diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5efee0b --- /dev/null +++ b/.gitignore @@ -0,0 +1,44 @@ +#bin +#obj +csx +.vs +edge +#Publish + +*.user +*.suo +*.cscfg +*.Cache +project.lock.json + +#/packages +/TestResults + +/tools/NuGet.exe +/App_Data +/secrets +/data +.secrets +appsettings.json +local.settings.json + +#node_modules — need this +#dist + +# Local python packages +.python_packages/ + +# Python Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +.DS_Store \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..4a6862a --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# terraform-azure-ballroom +Source code for serverless functions on Azure \ No newline at end of file diff --git a/main.tf b/main.tf new file mode 100644 index 0000000..46d7a02 --- /dev/null +++ b/main.tf @@ -0,0 +1,9 @@ +data "archive_file" "code_package" { + type = "zip" + source_dir = "${path.module}/src" + output_path = "${path.module}/dist/server.zip" +} + +output "output_path" { + value = data.archive_file.code_package.output_path +} \ No newline at end of file diff --git a/src/api/function.json b/src/api/function.json new file mode 100644 index 0000000..05cd439 --- /dev/null +++ b/src/api/function.json @@ -0,0 +1,23 @@ +{ + "bindings": [ + { + "authLevel": "anonymous", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": [ + "get", + "post", + "delete", + "patch" + ], + "route": "api/{action}/{id?}" + }, + { + "type": "http", + "direction": "out", + "name": "res" + } + ], + "disabled": false + } \ No newline at end of file diff --git a/src/api/index.js b/src/api/index.js new file mode 100644 index 0000000..0e85001 --- /dev/null +++ b/src/api/index.js @@ -0,0 +1,152 @@ +var azure = require('azure-storage'); +var entGen = azure.TableUtilities.entityGenerator; +var tableService = azure.createTableService(process.env.TABLES_CONNECTION_STRING); +const tableName = "tweets" +const uuidv4 = require('uuid/v4') + +// using table storage as a kind of "serverless" NoSQL database +module.exports = function (context, req) { + if (req.params.action === "tweet") { + handleTweet(context) + } else { + context.res = { + status: 404 + } + } +}; + +async function handleTweet(context) { + tableService.createTableIfNotExists(tableName, function (error, result, response) { + if (!error) { + switch (context.req.method) { + case "POST": + createTweet(context) + break + case "GET": + readTweet(context) + break + case "PATCH": + updateTweet(context) + break + case "DELETE": + deleteTweet(context) + break + } + } + }); +} + +function createTweet(context) { + let message = context.req.body.message + let name = context.req.body.name + let entity = { + PartitionKey: entGen.String(name), + RowKey: entGen.String(uuidv4()), + message: entGen.String(message), + }; + tableService.insertEntity(tableName, entity, function (error, result, response) { + if (!error) { + context.res = { + status: 201 + } + } else { + context.log(error) + context.res = { + status: 400 + } + } + context.done() + }); +} + +function transformTweet(record) { + return { + uuid: record.RowKey._, + name: record.PartitionKey._, + message: record.message._, + timestamp: record.Timestamp._ + } +} + +function readTweet(context) { + let uuid = context.req.params.id + let list = uuid === undefined + if (list) { + tableService.queryEntities(tableName, null, null, function (error, result) { + if (!error) { + let tweets = result.entries.map(function(e){return transformTweet(e)}) + context.res = { + headers: {"Content-Type":"text/json"}, + status: 200, + body: JSON.stringify(tweets) + } + } else { + context.res = { + status: 500 + } + } + context.done() + return + }) + } else{ + let name = context.req.query.name + tableService.retrieveEntity(tableName, name, uuid, function (error, result, response) { + if (!error) { + let tweet = transformTweet(result) + context.res = { + headers: {"Content-Type":"text/json"}, + status: 200, + body: JSON.stringify(tweet) + } + } else { + context.res = { + status: 404 + } + } + context.done() + }); + } + +} + +function updateTweet(context) { + let uuid = context.req.params.id + let entity = { + PartitionKey: entGen.String(context.req.body.name), + RowKey: entGen.String(context.req.body.uuid), + message: entGen.String(context.req.body.message) + }; + tableService.insertOrReplaceEntity(tableName, entity, function (error, result, response) { + if (!error) { + context.res = { + status: 202 + } + } else { + context.res = { + status: 400 + } + } + context.done() + }); +} + +function deleteTweet(context) { + let uuid = context.req.params.id + let name = context.req.query.name + var entity = { + PartitionKey: entGen.String(name), + RowKey: entGen.String(uuid), + }; + tableService.deleteEntity(tableName, entity, function (error, result, response) { + if (!error) { + context.res = { + status: 202 + } + } else { + context.res = { + status: 404 + } + } + context.done() + }); +} \ No newline at end of file diff --git a/src/host.json b/src/host.json new file mode 100755 index 0000000..b7f2a06 --- /dev/null +++ b/src/host.json @@ -0,0 +1,8 @@ +{ + "version": "2.0", + "extensions": { + "http": { + "routePrefix": "" + } +} +} \ No newline at end of file diff --git a/src/node_modules/.bin/sshpk-conv b/src/node_modules/.bin/sshpk-conv new file mode 120000 index 0000000..a2a295c --- /dev/null +++ b/src/node_modules/.bin/sshpk-conv @@ -0,0 +1 @@ +../sshpk/bin/sshpk-conv \ No newline at end of file diff --git a/src/node_modules/.bin/sshpk-sign b/src/node_modules/.bin/sshpk-sign new file mode 120000 index 0000000..766b9b3 --- /dev/null +++ b/src/node_modules/.bin/sshpk-sign @@ -0,0 +1 @@ +../sshpk/bin/sshpk-sign \ No newline at end of file diff --git a/src/node_modules/.bin/sshpk-verify b/src/node_modules/.bin/sshpk-verify new file mode 120000 index 0000000..bfd7e3a --- /dev/null +++ b/src/node_modules/.bin/sshpk-verify @@ -0,0 +1 @@ +../sshpk/bin/sshpk-verify \ No newline at end of file diff --git a/src/node_modules/.bin/uuid b/src/node_modules/.bin/uuid new file mode 120000 index 0000000..b3e45bc --- /dev/null +++ b/src/node_modules/.bin/uuid @@ -0,0 +1 @@ +../uuid/bin/uuid \ No newline at end of file diff --git a/src/node_modules/ajv/.tonic_example.js b/src/node_modules/ajv/.tonic_example.js new file mode 100644 index 0000000..aa11812 --- /dev/null +++ b/src/node_modules/ajv/.tonic_example.js @@ -0,0 +1,20 @@ +var Ajv = require('ajv'); +var ajv = new Ajv({allErrors: true}); + +var schema = { + "properties": { + "foo": { "type": "string" }, + "bar": { "type": "number", "maximum": 3 } + } +}; + +var validate = ajv.compile(schema); + +test({"foo": "abc", "bar": 2}); +test({"foo": 2, "bar": 4}); + +function test(data) { + var valid = validate(data); + if (valid) console.log('Valid!'); + else console.log('Invalid: ' + ajv.errorsText(validate.errors)); +} \ No newline at end of file diff --git a/src/node_modules/ajv/LICENSE b/src/node_modules/ajv/LICENSE new file mode 100644 index 0000000..96ee719 --- /dev/null +++ b/src/node_modules/ajv/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Evgeny Poberezkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/src/node_modules/ajv/README.md b/src/node_modules/ajv/README.md new file mode 100644 index 0000000..c858efd --- /dev/null +++ b/src/node_modules/ajv/README.md @@ -0,0 +1,1344 @@ +Ajv logo + +# Ajv: Another JSON Schema Validator + +The fastest JSON Schema validator for Node.js and browser. Supports draft-04/06/07. + +[![Build Status](https://travis-ci.org/epoberezkin/ajv.svg?branch=master)](https://travis-ci.org/epoberezkin/ajv) +[![npm](https://img.shields.io/npm/v/ajv.svg)](https://www.npmjs.com/package/ajv) +[![npm downloads](https://img.shields.io/npm/dm/ajv.svg)](https://www.npmjs.com/package/ajv) +[![Coverage Status](https://coveralls.io/repos/epoberezkin/ajv/badge.svg?branch=master&service=github)](https://coveralls.io/github/epoberezkin/ajv?branch=master) +[![Greenkeeper badge](https://badges.greenkeeper.io/epoberezkin/ajv.svg)](https://greenkeeper.io/) +[![Gitter](https://img.shields.io/gitter/room/ajv-validator/ajv.svg)](https://gitter.im/ajv-validator/ajv) + +### _Ajv and [related repositories](#related-packages) will be transfered to [ajv-validator](https://github.com/ajv-validator) org_ + +## Using version 6 + +[JSON Schema draft-07](http://json-schema.org/latest/json-schema-validation.html) is published. + +[Ajv version 6.0.0](https://github.com/epoberezkin/ajv/releases/tag/v6.0.0) that supports draft-07 is released. It may require either migrating your schemas or updating your code (to continue using draft-04 and v5 schemas, draft-06 schemas will be supported without changes). + +__Please note__: To use Ajv with draft-06 schemas you need to explicitly add the meta-schema to the validator instance: + +```javascript +ajv.addMetaSchema(require('ajv/lib/refs/json-schema-draft-06.json')); +``` + +To use Ajv with draft-04 schemas in addition to explicitly adding meta-schema you also need to use option schemaId: + +```javascript +var ajv = new Ajv({schemaId: 'id'}); +// If you want to use both draft-04 and draft-06/07 schemas: +// var ajv = new Ajv({schemaId: 'auto'}); +ajv.addMetaSchema(require('ajv/lib/refs/json-schema-draft-04.json')); +``` + + +## Contents + +- [Performance](#performance) +- [Features](#features) +- [Getting started](#getting-started) +- [Frequently Asked Questions](https://github.com/epoberezkin/ajv/blob/master/FAQ.md) +- [Using in browser](#using-in-browser) +- [Command line interface](#command-line-interface) +- Validation + - [Keywords](#validation-keywords) + - [Annotation keywords](#annotation-keywords) + - [Formats](#formats) + - [Combining schemas with $ref](#ref) + - [$data reference](#data-reference) + - NEW: [$merge and $patch keywords](#merge-and-patch-keywords) + - [Defining custom keywords](#defining-custom-keywords) + - [Asynchronous schema compilation](#asynchronous-schema-compilation) + - [Asynchronous validation](#asynchronous-validation) + - [Security considerations](#security-considerations) +- Modifying data during validation + - [Filtering data](#filtering-data) + - [Assigning defaults](#assigning-defaults) + - [Coercing data types](#coercing-data-types) +- API + - [Methods](#api) + - [Options](#options) + - [Validation errors](#validation-errors) +- [Plugins](#plugins) +- [Related packages](#related-packages) +- [Some packages using Ajv](#some-packages-using-ajv) +- [Tests, Contributing, History, License](#tests) + + +## Performance + +Ajv generates code using [doT templates](https://github.com/olado/doT) to turn JSON Schemas into super-fast validation functions that are efficient for v8 optimization. + +Currently Ajv is the fastest and the most standard compliant validator according to these benchmarks: + +- [json-schema-benchmark](https://github.com/ebdrup/json-schema-benchmark) - 50% faster than the second place +- [jsck benchmark](https://github.com/pandastrike/jsck#benchmarks) - 20-190% faster +- [z-schema benchmark](https://rawgit.com/zaggino/z-schema/master/benchmark/results.html) +- [themis benchmark](https://cdn.rawgit.com/playlyfe/themis/master/benchmark/results.html) + + +Performance of different validators by [json-schema-benchmark](https://github.com/ebdrup/json-schema-benchmark): + +[![performance](https://chart.googleapis.com/chart?chxt=x,y&cht=bhs&chco=76A4FB&chls=2.0&chbh=32,4,1&chs=600x416&chxl=-1:|djv|ajv|json-schema-validator-generator|jsen|is-my-json-valid|themis|z-schema|jsck|skeemas|json-schema-library|tv4&chd=t:100,98,72.1,66.8,50.1,15.1,6.1,3.8,1.2,0.7,0.2)](https://github.com/ebdrup/json-schema-benchmark/blob/master/README.md#performance) + + +## Features + +- Ajv implements full JSON Schema [draft-06/07](http://json-schema.org/) and draft-04 standards: + - all validation keywords (see [JSON Schema validation keywords](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md)) + - full support of remote refs (remote schemas have to be added with `addSchema` or compiled to be available) + - support of circular references between schemas + - correct string lengths for strings with unicode pairs (can be turned off) + - [formats](#formats) defined by JSON Schema draft-07 standard and custom formats (can be turned off) + - [validates schemas against meta-schema](#api-validateschema) +- supports [browsers](#using-in-browser) and Node.js 0.10-8.x +- [asynchronous loading](#asynchronous-schema-compilation) of referenced schemas during compilation +- "All errors" validation mode with [option allErrors](#options) +- [error messages with parameters](#validation-errors) describing error reasons to allow creating custom error messages +- i18n error messages support with [ajv-i18n](https://github.com/epoberezkin/ajv-i18n) package +- [filtering data](#filtering-data) from additional properties +- [assigning defaults](#assigning-defaults) to missing properties and items +- [coercing data](#coercing-data-types) to the types specified in `type` keywords +- [custom keywords](#defining-custom-keywords) +- draft-06/07 keywords `const`, `contains`, `propertyNames` and `if/then/else` +- draft-06 boolean schemas (`true`/`false` as a schema to always pass/fail). +- keywords `switch`, `patternRequired`, `formatMaximum` / `formatMinimum` and `formatExclusiveMaximum` / `formatExclusiveMinimum` from [JSON Schema extension proposals](https://github.com/json-schema/json-schema/wiki/v5-Proposals) with [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) package +- [$data reference](#data-reference) to use values from the validated data as values for the schema keywords +- [asynchronous validation](#asynchronous-validation) of custom formats and keywords + +Currently Ajv is the only validator that passes all the tests from [JSON Schema Test Suite](https://github.com/json-schema/JSON-Schema-Test-Suite) (according to [json-schema-benchmark](https://github.com/ebdrup/json-schema-benchmark), apart from the test that requires that `1.0` is not an integer that is impossible to satisfy in JavaScript). + + +## Install + +``` +npm install ajv +``` + + +## Getting started + +Try it in the Node.js REPL: https://tonicdev.com/npm/ajv + + +The fastest validation call: + +```javascript +var Ajv = require('ajv'); +var ajv = new Ajv(); // options can be passed, e.g. {allErrors: true} +var validate = ajv.compile(schema); +var valid = validate(data); +if (!valid) console.log(validate.errors); +``` + +or with less code + +```javascript +// ... +var valid = ajv.validate(schema, data); +if (!valid) console.log(ajv.errors); +// ... +``` + +or + +```javascript +// ... +var valid = ajv.addSchema(schema, 'mySchema') + .validate('mySchema', data); +if (!valid) console.log(ajv.errorsText()); +// ... +``` + +See [API](#api) and [Options](#options) for more details. + +Ajv compiles schemas to functions and caches them in all cases (using schema serialized with [fast-json-stable-stringify](https://github.com/epoberezkin/fast-json-stable-stringify) or a custom function as a key), so that the next time the same schema is used (not necessarily the same object instance) it won't be compiled again. + +The best performance is achieved when using compiled functions returned by `compile` or `getSchema` methods (there is no additional function call). + +__Please note__: every time a validation function or `ajv.validate` are called `errors` property is overwritten. You need to copy `errors` array reference to another variable if you want to use it later (e.g., in the callback). See [Validation errors](#validation-errors) + + +## Using in browser + +You can require Ajv directly from the code you browserify - in this case Ajv will be a part of your bundle. + +If you need to use Ajv in several bundles you can create a separate UMD bundle using `npm run bundle` script (thanks to [siddo420](https://github.com/siddo420)). + +Then you need to load Ajv in the browser: +```html + +``` + +This bundle can be used with different module systems; it creates global `Ajv` if no module system is found. + +The browser bundle is available on [cdnjs](https://cdnjs.com/libraries/ajv). + +Ajv is tested with these browsers: + +[![Sauce Test Status](https://saucelabs.com/browser-matrix/epoberezkin.svg)](https://saucelabs.com/u/epoberezkin) + +__Please note__: some frameworks, e.g. Dojo, may redefine global require in such way that is not compatible with CommonJS module format. In such case Ajv bundle has to be loaded before the framework and then you can use global Ajv (see issue [#234](https://github.com/epoberezkin/ajv/issues/234)). + + +## Command line interface + +CLI is available as a separate npm package [ajv-cli](https://github.com/jessedc/ajv-cli). It supports: + +- compiling JSON Schemas to test their validity +- BETA: generating standalone module exporting a validation function to be used without Ajv (using [ajv-pack](https://github.com/epoberezkin/ajv-pack)) +- migrate schemas to draft-07 (using [json-schema-migrate](https://github.com/epoberezkin/json-schema-migrate)) +- validating data file(s) against JSON Schema +- testing expected validity of data against JSON Schema +- referenced schemas +- custom meta-schemas +- files in JSON and JavaScript format +- all Ajv options +- reporting changes in data after validation in [JSON-patch](https://tools.ietf.org/html/rfc6902) format + + +## Validation keywords + +Ajv supports all validation keywords from draft-07 of JSON Schema standard: + +- [type](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#type) +- [for numbers](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-numbers) - maximum, minimum, exclusiveMaximum, exclusiveMinimum, multipleOf +- [for strings](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-strings) - maxLength, minLength, pattern, format +- [for arrays](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-arrays) - maxItems, minItems, uniqueItems, items, additionalItems, [contains](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#contains) +- [for objects](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-objects) - maxProperties, minProperties, required, properties, patternProperties, additionalProperties, dependencies, [propertyNames](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#propertynames) +- [for all types](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#keywords-for-all-types) - enum, [const](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#const) +- [compound keywords](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#compound-keywords) - not, oneOf, anyOf, allOf, [if/then/else](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#ifthenelse) + +With [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) package Ajv also supports validation keywords from [JSON Schema extension proposals](https://github.com/json-schema/json-schema/wiki/v5-Proposals) for JSON Schema standard: + +- [patternRequired](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#patternrequired-proposed) - like `required` but with patterns that some property should match. +- [formatMaximum, formatMinimum, formatExclusiveMaximum, formatExclusiveMinimum](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md#formatmaximum--formatminimum-and-exclusiveformatmaximum--exclusiveformatminimum-proposed) - setting limits for date, time, etc. + +See [JSON Schema validation keywords](https://github.com/epoberezkin/ajv/blob/master/KEYWORDS.md) for more details. + + +## Annotation keywords + +JSON Schema specification defines several annotation keywords that describe schema itself but do not perform any validation. + +- `title` and `description`: information about the data represented by that schema +- `$comment` (NEW in draft-07): information for developers. With option `$comment` Ajv logs or passes the comment string to the user-supplied function. See [Options](#options). +- `default`: a default value of the data instance, see [Assigning defaults](#assigning-defaults). +- `examples` (NEW in draft-07): an array of data instances. Ajv does not check the validity of these instances against the schema. +- `readOnly` and `writeOnly` (NEW in draft-07): marks data-instance as read-only or write-only in relation to the source of the data (database, api, etc.). +- `contentEncoding`: [RFC 2045](https://tools.ietf.org/html/rfc2045#section-6.1 ), e.g., "base64". +- `contentMediaType`: [RFC 2046](https://tools.ietf.org/html/rfc2046), e.g., "image/png". + +__Please note__: Ajv does not implement validation of the keywords `examples`, `contentEncoding` and `contentMediaType` but it reserves them. If you want to create a plugin that implements some of them, it should remove these keywords from the instance. + + +## Formats + +The following formats are supported for string validation with "format" keyword: + +- _date_: full-date according to [RFC3339](http://tools.ietf.org/html/rfc3339#section-5.6). +- _time_: time with optional time-zone. +- _date-time_: date-time from the same source (time-zone is mandatory). `date`, `time` and `date-time` validate ranges in `full` mode and only regexp in `fast` mode (see [options](#options)). +- _uri_: full URI. +- _uri-reference_: URI reference, including full and relative URIs. +- _uri-template_: URI template according to [RFC6570](https://tools.ietf.org/html/rfc6570) +- _url_ (deprecated): [URL record](https://url.spec.whatwg.org/#concept-url). +- _email_: email address. +- _hostname_: host name according to [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5). +- _ipv4_: IP address v4. +- _ipv6_: IP address v6. +- _regex_: tests whether a string is a valid regular expression by passing it to RegExp constructor. +- _uuid_: Universally Unique IDentifier according to [RFC4122](http://tools.ietf.org/html/rfc4122). +- _json-pointer_: JSON-pointer according to [RFC6901](https://tools.ietf.org/html/rfc6901). +- _relative-json-pointer_: relative JSON-pointer according to [this draft](http://tools.ietf.org/html/draft-luff-relative-json-pointer-00). + +__Please note__: JSON Schema draft-07 also defines formats `iri`, `iri-reference`, `idn-hostname` and `idn-email` for URLs, hostnames and emails with international characters. Ajv does not implement these formats. If you create Ajv plugin that implements them please make a PR to mention this plugin here. + +There are two modes of format validation: `fast` and `full`. This mode affects formats `date`, `time`, `date-time`, `uri`, `uri-reference`, `email`, and `hostname`. See [Options](#options) for details. + +You can add additional formats and replace any of the formats above using [addFormat](#api-addformat) method. + +The option `unknownFormats` allows changing the default behaviour when an unknown format is encountered. In this case Ajv can either fail schema compilation (default) or ignore it (default in versions before 5.0.0). You also can whitelist specific format(s) to be ignored. See [Options](#options) for details. + +You can find regular expressions used for format validation and the sources that were used in [formats.js](https://github.com/epoberezkin/ajv/blob/master/lib/compile/formats.js). + + +## Combining schemas with $ref + +You can structure your validation logic across multiple schema files and have schemas reference each other using `$ref` keyword. + +Example: + +```javascript +var schema = { + "$id": "http://example.com/schemas/schema.json", + "type": "object", + "properties": { + "foo": { "$ref": "defs.json#/definitions/int" }, + "bar": { "$ref": "defs.json#/definitions/str" } + } +}; + +var defsSchema = { + "$id": "http://example.com/schemas/defs.json", + "definitions": { + "int": { "type": "integer" }, + "str": { "type": "string" } + } +}; +``` + +Now to compile your schema you can either pass all schemas to Ajv instance: + +```javascript +var ajv = new Ajv({schemas: [schema, defsSchema]}); +var validate = ajv.getSchema('http://example.com/schemas/schema.json'); +``` + +or use `addSchema` method: + +```javascript +var ajv = new Ajv; +var validate = ajv.addSchema(defsSchema) + .compile(schema); +``` + +See [Options](#options) and [addSchema](#api) method. + +__Please note__: +- `$ref` is resolved as the uri-reference using schema $id as the base URI (see the example). +- References can be recursive (and mutually recursive) to implement the schemas for different data structures (such as linked lists, trees, graphs, etc.). +- You don't have to host your schema files at the URIs that you use as schema $id. These URIs are only used to identify the schemas, and according to JSON Schema specification validators should not expect to be able to download the schemas from these URIs. +- The actual location of the schema file in the file system is not used. +- You can pass the identifier of the schema as the second parameter of `addSchema` method or as a property name in `schemas` option. This identifier can be used instead of (or in addition to) schema $id. +- You cannot have the same $id (or the schema identifier) used for more than one schema - the exception will be thrown. +- You can implement dynamic resolution of the referenced schemas using `compileAsync` method. In this way you can store schemas in any system (files, web, database, etc.) and reference them without explicitly adding to Ajv instance. See [Asynchronous schema compilation](#asynchronous-schema-compilation). + + +## $data reference + +With `$data` option you can use values from the validated data as the values for the schema keywords. See [proposal](https://github.com/json-schema/json-schema/wiki/$data-(v5-proposal)) for more information about how it works. + +`$data` reference is supported in the keywords: const, enum, format, maximum/minimum, exclusiveMaximum / exclusiveMinimum, maxLength / minLength, maxItems / minItems, maxProperties / minProperties, formatMaximum / formatMinimum, formatExclusiveMaximum / formatExclusiveMinimum, multipleOf, pattern, required, uniqueItems. + +The value of "$data" should be a [JSON-pointer](https://tools.ietf.org/html/rfc6901) to the data (the root is always the top level data object, even if the $data reference is inside a referenced subschema) or a [relative JSON-pointer](http://tools.ietf.org/html/draft-luff-relative-json-pointer-00) (it is relative to the current point in data; if the $data reference is inside a referenced subschema it cannot point to the data outside of the root level for this subschema). + +Examples. + +This schema requires that the value in property `smaller` is less or equal than the value in the property larger: + +```javascript +var ajv = new Ajv({$data: true}); + +var schema = { + "properties": { + "smaller": { + "type": "number", + "maximum": { "$data": "1/larger" } + }, + "larger": { "type": "number" } + } +}; + +var validData = { + smaller: 5, + larger: 7 +}; + +ajv.validate(schema, validData); // true +``` + +This schema requires that the properties have the same format as their field names: + +```javascript +var schema = { + "additionalProperties": { + "type": "string", + "format": { "$data": "0#" } + } +}; + +var validData = { + 'date-time': '1963-06-19T08:30:06.283185Z', + email: 'joe.bloggs@example.com' +} +``` + +`$data` reference is resolved safely - it won't throw even if some property is undefined. If `$data` resolves to `undefined` the validation succeeds (with the exclusion of `const` keyword). If `$data` resolves to incorrect type (e.g. not "number" for maximum keyword) the validation fails. + + +## $merge and $patch keywords + +With the package [ajv-merge-patch](https://github.com/epoberezkin/ajv-merge-patch) you can use the keywords `$merge` and `$patch` that allow extending JSON Schemas with patches using formats [JSON Merge Patch (RFC 7396)](https://tools.ietf.org/html/rfc7396) and [JSON Patch (RFC 6902)](https://tools.ietf.org/html/rfc6902). + +To add keywords `$merge` and `$patch` to Ajv instance use this code: + +```javascript +require('ajv-merge-patch')(ajv); +``` + +Examples. + +Using `$merge`: + +```json +{ + "$merge": { + "source": { + "type": "object", + "properties": { "p": { "type": "string" } }, + "additionalProperties": false + }, + "with": { + "properties": { "q": { "type": "number" } } + } + } +} +``` + +Using `$patch`: + +```json +{ + "$patch": { + "source": { + "type": "object", + "properties": { "p": { "type": "string" } }, + "additionalProperties": false + }, + "with": [ + { "op": "add", "path": "/properties/q", "value": { "type": "number" } } + ] + } +} +``` + +The schemas above are equivalent to this schema: + +```json +{ + "type": "object", + "properties": { + "p": { "type": "string" }, + "q": { "type": "number" } + }, + "additionalProperties": false +} +``` + +The properties `source` and `with` in the keywords `$merge` and `$patch` can use absolute or relative `$ref` to point to other schemas previously added to the Ajv instance or to the fragments of the current schema. + +See the package [ajv-merge-patch](https://github.com/epoberezkin/ajv-merge-patch) for more information. + + +## Defining custom keywords + +The advantages of using custom keywords are: + +- allow creating validation scenarios that cannot be expressed using JSON Schema +- simplify your schemas +- help bringing a bigger part of the validation logic to your schemas +- make your schemas more expressive, less verbose and closer to your application domain +- implement custom data processors that modify your data (`modifying` option MUST be used in keyword definition) and/or create side effects while the data is being validated + +If a keyword is used only for side-effects and its validation result is pre-defined, use option `valid: true/false` in keyword definition to simplify both generated code (no error handling in case of `valid: true`) and your keyword functions (no need to return any validation result). + +The concerns you have to be aware of when extending JSON Schema standard with custom keywords are the portability and understanding of your schemas. You will have to support these custom keywords on other platforms and to properly document these keywords so that everybody can understand them in your schemas. + +You can define custom keywords with [addKeyword](#api-addkeyword) method. Keywords are defined on the `ajv` instance level - new instances will not have previously defined keywords. + +Ajv allows defining keywords with: +- validation function +- compilation function +- macro function +- inline compilation function that should return code (as string) that will be inlined in the currently compiled schema. + +Example. `range` and `exclusiveRange` keywords using compiled schema: + +```javascript +ajv.addKeyword('range', { + type: 'number', + compile: function (sch, parentSchema) { + var min = sch[0]; + var max = sch[1]; + + return parentSchema.exclusiveRange === true + ? function (data) { return data > min && data < max; } + : function (data) { return data >= min && data <= max; } + } +}); + +var schema = { "range": [2, 4], "exclusiveRange": true }; +var validate = ajv.compile(schema); +console.log(validate(2.01)); // true +console.log(validate(3.99)); // true +console.log(validate(2)); // false +console.log(validate(4)); // false +``` + +Several custom keywords (typeof, instanceof, range and propertyNames) are defined in [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) package - they can be used for your schemas and as a starting point for your own custom keywords. + +See [Defining custom keywords](https://github.com/epoberezkin/ajv/blob/master/CUSTOM.md) for more details. + + +## Asynchronous schema compilation + +During asynchronous compilation remote references are loaded using supplied function. See `compileAsync` [method](#api-compileAsync) and `loadSchema` [option](#options). + +Example: + +```javascript +var ajv = new Ajv({ loadSchema: loadSchema }); + +ajv.compileAsync(schema).then(function (validate) { + var valid = validate(data); + // ... +}); + +function loadSchema(uri) { + return request.json(uri).then(function (res) { + if (res.statusCode >= 400) + throw new Error('Loading error: ' + res.statusCode); + return res.body; + }); +} +``` + +__Please note__: [Option](#options) `missingRefs` should NOT be set to `"ignore"` or `"fail"` for asynchronous compilation to work. + + +## Asynchronous validation + +Example in Node.js REPL: https://tonicdev.com/esp/ajv-asynchronous-validation + +You can define custom formats and keywords that perform validation asynchronously by accessing database or some other service. You should add `async: true` in the keyword or format definition (see [addFormat](#api-addformat), [addKeyword](#api-addkeyword) and [Defining custom keywords](#defining-custom-keywords)). + +If your schema uses asynchronous formats/keywords or refers to some schema that contains them it should have `"$async": true` keyword so that Ajv can compile it correctly. If asynchronous format/keyword or reference to asynchronous schema is used in the schema without `$async` keyword Ajv will throw an exception during schema compilation. + +__Please note__: all asynchronous subschemas that are referenced from the current or other schemas should have `"$async": true` keyword as well, otherwise the schema compilation will fail. + +Validation function for an asynchronous custom format/keyword should return a promise that resolves with `true` or `false` (or rejects with `new Ajv.ValidationError(errors)` if you want to return custom errors from the keyword function). + +Ajv compiles asynchronous schemas to [es7 async functions](http://tc39.github.io/ecmascript-asyncawait/) that can optionally be transpiled with [nodent](https://github.com/MatAtBread/nodent). Async functions are supported in Node.js 7+ and all modern browsers. You can also supply any other transpiler as a function via `processCode` option. See [Options](#options). + +The compiled validation function has `$async: true` property (if the schema is asynchronous), so you can differentiate these functions if you are using both synchronous and asynchronous schemas. + +Validation result will be a promise that resolves with validated data or rejects with an exception `Ajv.ValidationError` that contains the array of validation errors in `errors` property. + + +Example: + +```javascript +var ajv = new Ajv; +// require('ajv-async')(ajv); + +ajv.addKeyword('idExists', { + async: true, + type: 'number', + validate: checkIdExists +}); + + +function checkIdExists(schema, data) { + return knex(schema.table) + .select('id') + .where('id', data) + .then(function (rows) { + return !!rows.length; // true if record is found + }); +} + +var schema = { + "$async": true, + "properties": { + "userId": { + "type": "integer", + "idExists": { "table": "users" } + }, + "postId": { + "type": "integer", + "idExists": { "table": "posts" } + } + } +}; + +var validate = ajv.compile(schema); + +validate({ userId: 1, postId: 19 }) +.then(function (data) { + console.log('Data is valid', data); // { userId: 1, postId: 19 } +}) +.catch(function (err) { + if (!(err instanceof Ajv.ValidationError)) throw err; + // data is invalid + console.log('Validation errors:', err.errors); +}); +``` + +### Using transpilers with asynchronous validation functions. + +[ajv-async](https://github.com/epoberezkin/ajv-async) uses [nodent](https://github.com/MatAtBread/nodent) to transpile async functions. To use another transpiler you should separately install it (or load its bundle in the browser). + + +#### Using nodent + +```javascript +var ajv = new Ajv; +require('ajv-async')(ajv); +// in the browser if you want to load ajv-async bundle separately you can: +// window.ajvAsync(ajv); +var validate = ajv.compile(schema); // transpiled es7 async function +validate(data).then(successFunc).catch(errorFunc); +``` + + +#### Using other transpilers + +```javascript +var ajv = new Ajv({ processCode: transpileFunc }); +var validate = ajv.compile(schema); // transpiled es7 async function +validate(data).then(successFunc).catch(errorFunc); +``` + +See [Options](#options). + + +## Security considerations + +JSON Schema, if properly used, can replace data sanitisation. It doesn't replace other API security considerations. It also introduces additional security aspects to consider. + + +##### Untrusted schemas + +Ajv treats JSON schemas as trusted as your application code. This security model is based on the most common use case, when the schemas are static and bundled together with the application. + +If your schemas are received from untrusted sources (or generated from untrusted data) there are several scenarios you need to prevent: +- compiling schemas can cause stack overflow (if they are too deep) +- compiling schemas can be slow (e.g. [#557](https://github.com/epoberezkin/ajv/issues/557)) +- validating certain data can be slow + +It is difficult to predict all the scenarios, but at the very least it may help to limit the size of untrusted schemas (e.g. limit JSON string length) and also the maximum schema object depth (that can be high for relatively small JSON strings). You also may want to mitigate slow regular expressions in `pattern` and `patternProperties` keywords. + +Regardless the measures you take, using untrusted schemas increases security risks. + + +##### Circular references in JavaScript objects + +Ajv does not support schemas and validated data that have circular references in objects. See [issue #802](https://github.com/epoberezkin/ajv/issues/802). + +An attempt to compile such schemas or validate such data would cause stack overflow (or will not complete in case of asynchronous validation). Depending on the parser you use, untrusted data can lead to circular references. + + +##### Security risks of trusted schemas + +Some keywords in JSON Schemas can lead to very slow validation for certain data. These keywords include (but may be not limited to): + +- `pattern` and `format` for large strings - use `maxLength` to mitigate +- `uniqueItems` for large non-scalar arrays - use `maxItems` to mitigate +- `patternProperties` for large property names - use `propertyNames` to mitigate + +__Please note__: The suggestions above to prevent slow validation would only work if you do NOT use `allErrors: true` in production code (using it would continue validation after validation errors). + +You can validate your JSON schemas against [this meta-schema](https://github.com/epoberezkin/ajv/blob/master/lib/refs/json-schema-secure.json) to check that these recommendations are followed: + +```javascript +const isSchemaSecure = ajv.compile(require('ajv/lib/refs/json-schema-secure.json')); + +const schema1 = {format: 'email'}; +isSchemaSecure(schema1); // false + +const schema2 = {format: 'email', maxLength: 256}; +isSchemaSecure(schema2); // true +``` + +__Please note__: following all these recommendation is not a guarantee that validation of untrusted data is safe - it can still lead to some undesirable results. + + +## Filtering data + +With [option `removeAdditional`](#options) (added by [andyscott](https://github.com/andyscott)) you can filter data during the validation. + +This option modifies original data. + +Example: + +```javascript +var ajv = new Ajv({ removeAdditional: true }); +var schema = { + "additionalProperties": false, + "properties": { + "foo": { "type": "number" }, + "bar": { + "additionalProperties": { "type": "number" }, + "properties": { + "baz": { "type": "string" } + } + } + } +} + +var data = { + "foo": 0, + "additional1": 1, // will be removed; `additionalProperties` == false + "bar": { + "baz": "abc", + "additional2": 2 // will NOT be removed; `additionalProperties` != false + }, +} + +var validate = ajv.compile(schema); + +console.log(validate(data)); // true +console.log(data); // { "foo": 0, "bar": { "baz": "abc", "additional2": 2 } +``` + +If `removeAdditional` option in the example above were `"all"` then both `additional1` and `additional2` properties would have been removed. + +If the option were `"failing"` then property `additional1` would have been removed regardless of its value and property `additional2` would have been removed only if its value were failing the schema in the inner `additionalProperties` (so in the example above it would have stayed because it passes the schema, but any non-number would have been removed). + +__Please note__: If you use `removeAdditional` option with `additionalProperties` keyword inside `anyOf`/`oneOf` keywords your validation can fail with this schema, for example: + +```json +{ + "type": "object", + "oneOf": [ + { + "properties": { + "foo": { "type": "string" } + }, + "required": [ "foo" ], + "additionalProperties": false + }, + { + "properties": { + "bar": { "type": "integer" } + }, + "required": [ "bar" ], + "additionalProperties": false + } + ] +} +``` + +The intention of the schema above is to allow objects with either the string property "foo" or the integer property "bar", but not with both and not with any other properties. + +With the option `removeAdditional: true` the validation will pass for the object `{ "foo": "abc"}` but will fail for the object `{"bar": 1}`. It happens because while the first subschema in `oneOf` is validated, the property `bar` is removed because it is an additional property according to the standard (because it is not included in `properties` keyword in the same schema). + +While this behaviour is unexpected (issues [#129](https://github.com/epoberezkin/ajv/issues/129), [#134](https://github.com/epoberezkin/ajv/issues/134)), it is correct. To have the expected behaviour (both objects are allowed and additional properties are removed) the schema has to be refactored in this way: + +```json +{ + "type": "object", + "properties": { + "foo": { "type": "string" }, + "bar": { "type": "integer" } + }, + "additionalProperties": false, + "oneOf": [ + { "required": [ "foo" ] }, + { "required": [ "bar" ] } + ] +} +``` + +The schema above is also more efficient - it will compile into a faster function. + + +## Assigning defaults + +With [option `useDefaults`](#options) Ajv will assign values from `default` keyword in the schemas of `properties` and `items` (when it is the array of schemas) to the missing properties and items. + +With the option value `"empty"` properties and items equal to `null` or `""` (empty string) will be considered missing and assigned defaults. + +This option modifies original data. + +__Please note__: the default value is inserted in the generated validation code as a literal, so the value inserted in the data will be the deep clone of the default in the schema. + + +Example 1 (`default` in `properties`): + +```javascript +var ajv = new Ajv({ useDefaults: true }); +var schema = { + "type": "object", + "properties": { + "foo": { "type": "number" }, + "bar": { "type": "string", "default": "baz" } + }, + "required": [ "foo", "bar" ] +}; + +var data = { "foo": 1 }; + +var validate = ajv.compile(schema); + +console.log(validate(data)); // true +console.log(data); // { "foo": 1, "bar": "baz" } +``` + +Example 2 (`default` in `items`): + +```javascript +var schema = { + "type": "array", + "items": [ + { "type": "number" }, + { "type": "string", "default": "foo" } + ] +} + +var data = [ 1 ]; + +var validate = ajv.compile(schema); + +console.log(validate(data)); // true +console.log(data); // [ 1, "foo" ] +``` + +`default` keywords in other cases are ignored: + +- not in `properties` or `items` subschemas +- in schemas inside `anyOf`, `oneOf` and `not` (see [#42](https://github.com/epoberezkin/ajv/issues/42)) +- in `if` subschema of `switch` keyword +- in schemas generated by custom macro keywords + +The [`strictDefaults` option](#options) customizes Ajv's behavior for the defaults that Ajv ignores (`true` raises an error, and `"log"` outputs a warning). + + +## Coercing data types + +When you are validating user inputs all your data properties are usually strings. The option `coerceTypes` allows you to have your data types coerced to the types specified in your schema `type` keywords, both to pass the validation and to use the correctly typed data afterwards. + +This option modifies original data. + +__Please note__: if you pass a scalar value to the validating function its type will be coerced and it will pass the validation, but the value of the variable you pass won't be updated because scalars are passed by value. + + +Example 1: + +```javascript +var ajv = new Ajv({ coerceTypes: true }); +var schema = { + "type": "object", + "properties": { + "foo": { "type": "number" }, + "bar": { "type": "boolean" } + }, + "required": [ "foo", "bar" ] +}; + +var data = { "foo": "1", "bar": "false" }; + +var validate = ajv.compile(schema); + +console.log(validate(data)); // true +console.log(data); // { "foo": 1, "bar": false } +``` + +Example 2 (array coercions): + +```javascript +var ajv = new Ajv({ coerceTypes: 'array' }); +var schema = { + "properties": { + "foo": { "type": "array", "items": { "type": "number" } }, + "bar": { "type": "boolean" } + } +}; + +var data = { "foo": "1", "bar": ["false"] }; + +var validate = ajv.compile(schema); + +console.log(validate(data)); // true +console.log(data); // { "foo": [1], "bar": false } +``` + +The coercion rules, as you can see from the example, are different from JavaScript both to validate user input as expected and to have the coercion reversible (to correctly validate cases where different types are defined in subschemas of "anyOf" and other compound keywords). + +See [Coercion rules](https://github.com/epoberezkin/ajv/blob/master/COERCION.md) for details. + + +## API + +##### new Ajv(Object options) -> Object + +Create Ajv instance. + + +##### .compile(Object schema) -> Function<Object data> + +Generate validating function and cache the compiled schema for future use. + +Validating function returns a boolean value. This function has properties `errors` and `schema`. Errors encountered during the last validation are assigned to `errors` property (it is assigned `null` if there was no errors). `schema` property contains the reference to the original schema. + +The schema passed to this method will be validated against meta-schema unless `validateSchema` option is false. If schema is invalid, an error will be thrown. See [options](#options). + + +##### .compileAsync(Object schema [, Boolean meta] [, Function callback]) -> Promise + +Asynchronous version of `compile` method that loads missing remote schemas using asynchronous function in `options.loadSchema`. This function returns a Promise that resolves to a validation function. An optional callback passed to `compileAsync` will be called with 2 parameters: error (or null) and validating function. The returned promise will reject (and the callback will be called with an error) when: + +- missing schema can't be loaded (`loadSchema` returns a Promise that rejects). +- a schema containing a missing reference is loaded, but the reference cannot be resolved. +- schema (or some loaded/referenced schema) is invalid. + +The function compiles schema and loads the first missing schema (or meta-schema) until all missing schemas are loaded. + +You can asynchronously compile meta-schema by passing `true` as the second parameter. + +See example in [Asynchronous compilation](#asynchronous-schema-compilation). + + +##### .validate(Object schema|String key|String ref, data) -> Boolean + +Validate data using passed schema (it will be compiled and cached). + +Instead of the schema you can use the key that was previously passed to `addSchema`, the schema id if it was present in the schema or any previously resolved reference. + +Validation errors will be available in the `errors` property of Ajv instance (`null` if there were no errors). + +__Please note__: every time this method is called the errors are overwritten so you need to copy them to another variable if you want to use them later. + +If the schema is asynchronous (has `$async` keyword on the top level) this method returns a Promise. See [Asynchronous validation](#asynchronous-validation). + + +##### .addSchema(Array<Object>|Object schema [, String key]) -> Ajv + +Add schema(s) to validator instance. This method does not compile schemas (but it still validates them). Because of that dependencies can be added in any order and circular dependencies are supported. It also prevents unnecessary compilation of schemas that are containers for other schemas but not used as a whole. + +Array of schemas can be passed (schemas should have ids), the second parameter will be ignored. + +Key can be passed that can be used to reference the schema and will be used as the schema id if there is no id inside the schema. If the key is not passed, the schema id will be used as the key. + + +Once the schema is added, it (and all the references inside it) can be referenced in other schemas and used to validate data. + +Although `addSchema` does not compile schemas, explicit compilation is not required - the schema will be compiled when it is used first time. + +By default the schema is validated against meta-schema before it is added, and if the schema does not pass validation the exception is thrown. This behaviour is controlled by `validateSchema` option. + +__Please note__: Ajv uses the [method chaining syntax](https://en.wikipedia.org/wiki/Method_chaining) for all methods with the prefix `add*` and `remove*`. +This allows you to do nice things like the following. + +```javascript +var validate = new Ajv().addSchema(schema).addFormat(name, regex).getSchema(uri); +``` + +##### .addMetaSchema(Array<Object>|Object schema [, String key]) -> Ajv + +Adds meta schema(s) that can be used to validate other schemas. That function should be used instead of `addSchema` because there may be instance options that would compile a meta schema incorrectly (at the moment it is `removeAdditional` option). + +There is no need to explicitly add draft-07 meta schema (http://json-schema.org/draft-07/schema) - it is added by default, unless option `meta` is set to `false`. You only need to use it if you have a changed meta-schema that you want to use to validate your schemas. See `validateSchema`. + + +##### .validateSchema(Object schema) -> Boolean + +Validates schema. This method should be used to validate schemas rather than `validate` due to the inconsistency of `uri` format in JSON Schema standard. + +By default this method is called automatically when the schema is added, so you rarely need to use it directly. + +If schema doesn't have `$schema` property, it is validated against draft 6 meta-schema (option `meta` should not be false). + +If schema has `$schema` property, then the schema with this id (that should be previously added) is used to validate passed schema. + +Errors will be available at `ajv.errors`. + + +##### .getSchema(String key) -> Function<Object data> + +Retrieve compiled schema previously added with `addSchema` by the key passed to `addSchema` or by its full reference (id). The returned validating function has `schema` property with the reference to the original schema. + + +##### .removeSchema([Object schema|String key|String ref|RegExp pattern]) -> Ajv + +Remove added/cached schema. Even if schema is referenced by other schemas it can be safely removed as dependent schemas have local references. + +Schema can be removed using: +- key passed to `addSchema` +- it's full reference (id) +- RegExp that should match schema id or key (meta-schemas won't be removed) +- actual schema object that will be stable-stringified to remove schema from cache + +If no parameter is passed all schemas but meta-schemas will be removed and the cache will be cleared. + + +##### .addFormat(String name, String|RegExp|Function|Object format) -> Ajv + +Add custom format to validate strings or numbers. It can also be used to replace pre-defined formats for Ajv instance. + +Strings are converted to RegExp. + +Function should return validation result as `true` or `false`. + +If object is passed it should have properties `validate`, `compare` and `async`: + +- _validate_: a string, RegExp or a function as described above. +- _compare_: an optional comparison function that accepts two strings and compares them according to the format meaning. This function is used with keywords `formatMaximum`/`formatMinimum` (defined in [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) package). It should return `1` if the first value is bigger than the second value, `-1` if it is smaller and `0` if it is equal. +- _async_: an optional `true` value if `validate` is an asynchronous function; in this case it should return a promise that resolves with a value `true` or `false`. +- _type_: an optional type of data that the format applies to. It can be `"string"` (default) or `"number"` (see https://github.com/epoberezkin/ajv/issues/291#issuecomment-259923858). If the type of data is different, the validation will pass. + +Custom formats can be also added via `formats` option. + + +##### .addKeyword(String keyword, Object definition) -> Ajv + +Add custom validation keyword to Ajv instance. + +Keyword should be different from all standard JSON Schema keywords and different from previously defined keywords. There is no way to redefine keywords or to remove keyword definition from the instance. + +Keyword must start with a letter, `_` or `$`, and may continue with letters, numbers, `_`, `$`, or `-`. +It is recommended to use an application-specific prefix for keywords to avoid current and future name collisions. + +Example Keywords: +- `"xyz-example"`: valid, and uses prefix for the xyz project to avoid name collisions. +- `"example"`: valid, but not recommended as it could collide with future versions of JSON Schema etc. +- `"3-example"`: invalid as numbers are not allowed to be the first character in a keyword + +Keyword definition is an object with the following properties: + +- _type_: optional string or array of strings with data type(s) that the keyword applies to. If not present, the keyword will apply to all types. +- _validate_: validating function +- _compile_: compiling function +- _macro_: macro function +- _inline_: compiling function that returns code (as string) +- _schema_: an optional `false` value used with "validate" keyword to not pass schema +- _metaSchema_: an optional meta-schema for keyword schema +- _dependencies_: an optional list of properties that must be present in the parent schema - it will be checked during schema compilation +- _modifying_: `true` MUST be passed if keyword modifies data +- _statements_: `true` can be passed in case inline keyword generates statements (as opposed to expression) +- _valid_: pass `true`/`false` to pre-define validation result, the result returned from validation function will be ignored. This option cannot be used with macro keywords. +- _$data_: an optional `true` value to support [$data reference](#data-reference) as the value of custom keyword. The reference will be resolved at validation time. If the keyword has meta-schema it would be extended to allow $data and it will be used to validate the resolved value. Supporting $data reference requires that keyword has validating function (as the only option or in addition to compile, macro or inline function). +- _async_: an optional `true` value if the validation function is asynchronous (whether it is compiled or passed in _validate_ property); in this case it should return a promise that resolves with a value `true` or `false`. This option is ignored in case of "macro" and "inline" keywords. +- _errors_: an optional boolean or string `"full"` indicating whether keyword returns errors. If this property is not set Ajv will determine if the errors were set in case of failed validation. + +_compile_, _macro_ and _inline_ are mutually exclusive, only one should be used at a time. _validate_ can be used separately or in addition to them to support $data reference. + +__Please note__: If the keyword is validating data type that is different from the type(s) in its definition, the validation function will not be called (and expanded macro will not be used), so there is no need to check for data type inside validation function or inside schema returned by macro function (unless you want to enforce a specific type and for some reason do not want to use a separate `type` keyword for that). In the same way as standard keywords work, if the keyword does not apply to the data type being validated, the validation of this keyword will succeed. + +See [Defining custom keywords](#defining-custom-keywords) for more details. + + +##### .getKeyword(String keyword) -> Object|Boolean + +Returns custom keyword definition, `true` for pre-defined keywords and `false` if the keyword is unknown. + + +##### .removeKeyword(String keyword) -> Ajv + +Removes custom or pre-defined keyword so you can redefine them. + +While this method can be used to extend pre-defined keywords, it can also be used to completely change their meaning - it may lead to unexpected results. + +__Please note__: schemas compiled before the keyword is removed will continue to work without changes. To recompile schemas use `removeSchema` method and compile them again. + + +##### .errorsText([Array<Object> errors [, Object options]]) -> String + +Returns the text with all errors in a String. + +Options can have properties `separator` (string used to separate errors, ", " by default) and `dataVar` (the variable name that dataPaths are prefixed with, "data" by default). + + +## Options + +Defaults: + +```javascript +{ + // validation and reporting options: + $data: false, + allErrors: false, + verbose: false, + $comment: false, // NEW in Ajv version 6.0 + jsonPointers: false, + uniqueItems: true, + unicode: true, + nullable: false, + format: 'fast', + formats: {}, + unknownFormats: true, + schemas: {}, + logger: undefined, + // referenced schema options: + schemaId: '$id', + missingRefs: true, + extendRefs: 'ignore', // recommended 'fail' + loadSchema: undefined, // function(uri: string): Promise {} + // options to modify validated data: + removeAdditional: false, + useDefaults: false, + coerceTypes: false, + // strict mode options + strictDefaults: false, + strictKeywords: false, + // asynchronous validation options: + transpile: undefined, // requires ajv-async package + // advanced options: + meta: true, + validateSchema: true, + addUsedSchema: true, + inlineRefs: true, + passContext: false, + loopRequired: Infinity, + ownProperties: false, + multipleOfPrecision: false, + errorDataPath: 'object', // deprecated + messages: true, + sourceCode: false, + processCode: undefined, // function (str: string): string {} + cache: new Cache, + serialize: undefined +} +``` + +##### Validation and reporting options + +- _$data_: support [$data references](#data-reference). Draft 6 meta-schema that is added by default will be extended to allow them. If you want to use another meta-schema you need to use $dataMetaSchema method to add support for $data reference. See [API](#api). +- _allErrors_: check all rules collecting all errors. Default is to return after the first error. +- _verbose_: include the reference to the part of the schema (`schema` and `parentSchema`) and validated data in errors (false by default). +- _$comment_ (NEW in Ajv version 6.0): log or pass the value of `$comment` keyword to a function. Option values: + - `false` (default): ignore $comment keyword. + - `true`: log the keyword value to console. + - function: pass the keyword value, its schema path and root schema to the specified function +- _jsonPointers_: set `dataPath` property of errors using [JSON Pointers](https://tools.ietf.org/html/rfc6901) instead of JavaScript property access notation. +- _uniqueItems_: validate `uniqueItems` keyword (true by default). +- _unicode_: calculate correct length of strings with unicode pairs (true by default). Pass `false` to use `.length` of strings that is faster, but gives "incorrect" lengths of strings with unicode pairs - each unicode pair is counted as two characters. +- _nullable_: support keyword "nullable" from [Open API 3 specification](https://swagger.io/docs/specification/data-models/data-types/). +- _format_: formats validation mode. Option values: + - `"fast"` (default) - simplified and fast validation (see [Formats](#formats) for details of which formats are available and affected by this option). + - `"full"` - more restrictive and slow validation. E.g., 25:00:00 and 2015/14/33 will be invalid time and date in 'full' mode but it will be valid in 'fast' mode. + - `false` - ignore all format keywords. +- _formats_: an object with custom formats. Keys and values will be passed to `addFormat` method. +- _unknownFormats_: handling of unknown formats. Option values: + - `true` (default) - if an unknown format is encountered the exception is thrown during schema compilation. If `format` keyword value is [$data reference](#data-reference) and it is unknown the validation will fail. + - `[String]` - an array of unknown format names that will be ignored. This option can be used to allow usage of third party schemas with format(s) for which you don't have definitions, but still fail if another unknown format is used. If `format` keyword value is [$data reference](#data-reference) and it is not in this array the validation will fail. + - `"ignore"` - to log warning during schema compilation and always pass validation (the default behaviour in versions before 5.0.0). This option is not recommended, as it allows to mistype format name and it won't be validated without any error message. This behaviour is required by JSON Schema specification. +- _schemas_: an array or object of schemas that will be added to the instance. In case you pass the array the schemas must have IDs in them. When the object is passed the method `addSchema(value, key)` will be called for each schema in this object. +- _logger_: sets the logging method. Default is the global `console` object that should have methods `log`, `warn` and `error`. Option values: + - custom logger - it should have methods `log`, `warn` and `error`. If any of these methods is missing an exception will be thrown. + - `false` - logging is disabled. + + +##### Referenced schema options + +- _schemaId_: this option defines which keywords are used as schema URI. Option value: + - `"$id"` (default) - only use `$id` keyword as schema URI (as specified in JSON Schema draft-06/07), ignore `id` keyword (if it is present a warning will be logged). + - `"id"` - only use `id` keyword as schema URI (as specified in JSON Schema draft-04), ignore `$id` keyword (if it is present a warning will be logged). + - `"auto"` - use both `$id` and `id` keywords as schema URI. If both are present (in the same schema object) and different the exception will be thrown during schema compilation. +- _missingRefs_: handling of missing referenced schemas. Option values: + - `true` (default) - if the reference cannot be resolved during compilation the exception is thrown. The thrown error has properties `missingRef` (with hash fragment) and `missingSchema` (without it). Both properties are resolved relative to the current base id (usually schema id, unless it was substituted). + - `"ignore"` - to log error during compilation and always pass validation. + - `"fail"` - to log error and successfully compile schema but fail validation if this rule is checked. +- _extendRefs_: validation of other keywords when `$ref` is present in the schema. Option values: + - `"ignore"` (default) - when `$ref` is used other keywords are ignored (as per [JSON Reference](https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03#section-3) standard). A warning will be logged during the schema compilation. + - `"fail"` (recommended) - if other validation keywords are used together with `$ref` the exception will be thrown when the schema is compiled. This option is recommended to make sure schema has no keywords that are ignored, which can be confusing. + - `true` - validate all keywords in the schemas with `$ref` (the default behaviour in versions before 5.0.0). +- _loadSchema_: asynchronous function that will be used to load remote schemas when `compileAsync` [method](#api-compileAsync) is used and some reference is missing (option `missingRefs` should NOT be 'fail' or 'ignore'). This function should accept remote schema uri as a parameter and return a Promise that resolves to a schema. See example in [Asynchronous compilation](#asynchronous-schema-compilation). + + +##### Options to modify validated data + +- _removeAdditional_: remove additional properties - see example in [Filtering data](#filtering-data). This option is not used if schema is added with `addMetaSchema` method. Option values: + - `false` (default) - not to remove additional properties + - `"all"` - all additional properties are removed, regardless of `additionalProperties` keyword in schema (and no validation is made for them). + - `true` - only additional properties with `additionalProperties` keyword equal to `false` are removed. + - `"failing"` - additional properties that fail schema validation will be removed (where `additionalProperties` keyword is `false` or schema). +- _useDefaults_: replace missing or undefined properties and items with the values from corresponding `default` keywords. Default behaviour is to ignore `default` keywords. This option is not used if schema is added with `addMetaSchema` method. See examples in [Assigning defaults](#assigning-defaults). Option values: + - `false` (default) - do not use defaults + - `true` - insert defaults by value (object literal is used). + - `"empty"` - in addition to missing or undefined, use defaults for properties and items that are equal to `null` or `""` (an empty string). + - `"shared"` (deprecated) - insert defaults by reference. If the default is an object, it will be shared by all instances of validated data. If you modify the inserted default in the validated data, it will be modified in the schema as well. +- _coerceTypes_: change data type of data to match `type` keyword. See the example in [Coercing data types](#coercing-data-types) and [coercion rules](https://github.com/epoberezkin/ajv/blob/master/COERCION.md). Option values: + - `false` (default) - no type coercion. + - `true` - coerce scalar data types. + - `"array"` - in addition to coercions between scalar types, coerce scalar data to an array with one element and vice versa (as required by the schema). + + +##### Strict mode options + +- _strictDefaults_: report ignored `default` keywords in schemas. Option values: + - `false` (default) - ignored defaults are not reported + - `true` - if an ignored default is present, throw an error + - `"log"` - if an ignored default is present, log warning +- _strictKeywords_: report unknown keywords in schemas. Option values: + - `false` (default) - unknown keywords are not reported + - `true` - if an unknown keyword is present, throw an error + - `"log"` - if an unknown keyword is present, log warning + + +##### Asynchronous validation options + +- _transpile_: Requires [ajv-async](https://github.com/epoberezkin/ajv-async) package. It determines whether Ajv transpiles compiled asynchronous validation function. Option values: + - `undefined` (default) - transpile with [nodent](https://github.com/MatAtBread/nodent) if async functions are not supported. + - `true` - always transpile with nodent. + - `false` - do not transpile; if async functions are not supported an exception will be thrown. + + +##### Advanced options + +- _meta_: add [meta-schema](http://json-schema.org/documentation.html) so it can be used by other schemas (true by default). If an object is passed, it will be used as the default meta-schema for schemas that have no `$schema` keyword. This default meta-schema MUST have `$schema` keyword. +- _validateSchema_: validate added/compiled schemas against meta-schema (true by default). `$schema` property in the schema can be http://json-schema.org/draft-07/schema or absent (draft-07 meta-schema will be used) or can be a reference to the schema previously added with `addMetaSchema` method. Option values: + - `true` (default) - if the validation fails, throw the exception. + - `"log"` - if the validation fails, log error. + - `false` - skip schema validation. +- _addUsedSchema_: by default methods `compile` and `validate` add schemas to the instance if they have `$id` (or `id`) property that doesn't start with "#". If `$id` is present and it is not unique the exception will be thrown. Set this option to `false` to skip adding schemas to the instance and the `$id` uniqueness check when these methods are used. This option does not affect `addSchema` method. +- _inlineRefs_: Affects compilation of referenced schemas. Option values: + - `true` (default) - the referenced schemas that don't have refs in them are inlined, regardless of their size - that substantially improves performance at the cost of the bigger size of compiled schema functions. + - `false` - to not inline referenced schemas (they will be compiled as separate functions). + - integer number - to limit the maximum number of keywords of the schema that will be inlined. +- _passContext_: pass validation context to custom keyword functions. If this option is `true` and you pass some context to the compiled validation function with `validate.call(context, data)`, the `context` will be available as `this` in your custom keywords. By default `this` is Ajv instance. +- _loopRequired_: by default `required` keyword is compiled into a single expression (or a sequence of statements in `allErrors` mode). In case of a very large number of properties in this keyword it may result in a very big validation function. Pass integer to set the number of properties above which `required` keyword will be validated in a loop - smaller validation function size but also worse performance. +- _ownProperties_: by default Ajv iterates over all enumerable object properties; when this option is `true` only own enumerable object properties (i.e. found directly on the object rather than on its prototype) are iterated. Contributed by @mbroadst. +- _multipleOfPrecision_: by default `multipleOf` keyword is validated by comparing the result of division with parseInt() of that result. It works for dividers that are bigger than 1. For small dividers such as 0.01 the result of the division is usually not integer (even when it should be integer, see issue [#84](https://github.com/epoberezkin/ajv/issues/84)). If you need to use fractional dividers set this option to some positive integer N to have `multipleOf` validated using this formula: `Math.abs(Math.round(division) - division) < 1e-N` (it is slower but allows for float arithmetics deviations). +- _errorDataPath_ (deprecated): set `dataPath` to point to 'object' (default) or to 'property' when validating keywords `required`, `additionalProperties` and `dependencies`. +- _messages_: Include human-readable messages in errors. `true` by default. `false` can be passed when custom messages are used (e.g. with [ajv-i18n](https://github.com/epoberezkin/ajv-i18n)). +- _sourceCode_: add `sourceCode` property to validating function (for debugging; this code can be different from the result of toString call). +- _processCode_: an optional function to process generated code before it is passed to Function constructor. It can be used to either beautify (the validating function is generated without line-breaks) or to transpile code. Starting from version 5.0.0 this option replaced options: + - `beautify` that formatted the generated function using [js-beautify](https://github.com/beautify-web/js-beautify). If you want to beautify the generated code pass `require('js-beautify').js_beautify`. + - `transpile` that transpiled asynchronous validation function. You can still use `transpile` option with [ajv-async](https://github.com/epoberezkin/ajv-async) package. See [Asynchronous validation](#asynchronous-validation) for more information. +- _cache_: an optional instance of cache to store compiled schemas using stable-stringified schema as a key. For example, set-associative cache [sacjs](https://github.com/epoberezkin/sacjs) can be used. If not passed then a simple hash is used which is good enough for the common use case (a limited number of statically defined schemas). Cache should have methods `put(key, value)`, `get(key)`, `del(key)` and `clear()`. +- _serialize_: an optional function to serialize schema to cache key. Pass `false` to use schema itself as a key (e.g., if WeakMap used as a cache). By default [fast-json-stable-stringify](https://github.com/epoberezkin/fast-json-stable-stringify) is used. + + +## Validation errors + +In case of validation failure, Ajv assigns the array of errors to `errors` property of validation function (or to `errors` property of Ajv instance when `validate` or `validateSchema` methods were called). In case of [asynchronous validation](#asynchronous-validation), the returned promise is rejected with exception `Ajv.ValidationError` that has `errors` property. + + +### Error objects + +Each error is an object with the following properties: + +- _keyword_: validation keyword. +- _dataPath_: the path to the part of the data that was validated. By default `dataPath` uses JavaScript property access notation (e.g., `".prop[1].subProp"`). When the option `jsonPointers` is true (see [Options](#options)) `dataPath` will be set using JSON pointer standard (e.g., `"/prop/1/subProp"`). +- _schemaPath_: the path (JSON-pointer as a URI fragment) to the schema of the keyword that failed validation. +- _params_: the object with the additional information about error that can be used to create custom error messages (e.g., using [ajv-i18n](https://github.com/epoberezkin/ajv-i18n) package). See below for parameters set by all keywords. +- _message_: the standard error message (can be excluded with option `messages` set to false). +- _schema_: the schema of the keyword (added with `verbose` option). +- _parentSchema_: the schema containing the keyword (added with `verbose` option) +- _data_: the data validated by the keyword (added with `verbose` option). + +__Please note__: `propertyNames` keyword schema validation errors have an additional property `propertyName`, `dataPath` points to the object. After schema validation for each property name, if it is invalid an additional error is added with the property `keyword` equal to `"propertyNames"`. + + +### Error parameters + +Properties of `params` object in errors depend on the keyword that failed validation. + +- `maxItems`, `minItems`, `maxLength`, `minLength`, `maxProperties`, `minProperties` - property `limit` (number, the schema of the keyword). +- `additionalItems` - property `limit` (the maximum number of allowed items in case when `items` keyword is an array of schemas and `additionalItems` is false). +- `additionalProperties` - property `additionalProperty` (the property not used in `properties` and `patternProperties` keywords). +- `dependencies` - properties: + - `property` (dependent property), + - `missingProperty` (required missing dependency - only the first one is reported currently) + - `deps` (required dependencies, comma separated list as a string), + - `depsCount` (the number of required dependencies). +- `format` - property `format` (the schema of the keyword). +- `maximum`, `minimum` - properties: + - `limit` (number, the schema of the keyword), + - `exclusive` (boolean, the schema of `exclusiveMaximum` or `exclusiveMinimum`), + - `comparison` (string, comparison operation to compare the data to the limit, with the data on the left and the limit on the right; can be "<", "<=", ">", ">=") +- `multipleOf` - property `multipleOf` (the schema of the keyword) +- `pattern` - property `pattern` (the schema of the keyword) +- `required` - property `missingProperty` (required property that is missing). +- `propertyNames` - property `propertyName` (an invalid property name). +- `patternRequired` (in ajv-keywords) - property `missingPattern` (required pattern that did not match any property). +- `type` - property `type` (required type(s), a string, can be a comma-separated list) +- `uniqueItems` - properties `i` and `j` (indices of duplicate items). +- `const` - property `allowedValue` pointing to the value (the schema of the keyword). +- `enum` - property `allowedValues` pointing to the array of values (the schema of the keyword). +- `$ref` - property `ref` with the referenced schema URI. +- `oneOf` - property `passingSchemas` (array of indices of passing schemas, null if no schema passes). +- custom keywords (in case keyword definition doesn't create errors) - property `keyword` (the keyword name). + + +## Plugins + +Ajv can be extended with plugins that add custom keywords, formats or functions to process generated code. When such plugin is published as npm package it is recommended that it follows these conventions: + +- it exports a function +- this function accepts ajv instance as the first parameter and returns the same instance to allow chaining +- this function can accept an optional configuration as the second parameter + +If you have published a useful plugin please submit a PR to add it to the next section. + + +## Related packages + +- [ajv-async](https://github.com/epoberezkin/ajv-async) - plugin to configure async validation mode +- [ajv-bsontype](https://github.com/BoLaMN/ajv-bsontype) - plugin to validate mongodb's bsonType formats +- [ajv-cli](https://github.com/jessedc/ajv-cli) - command line interface +- [ajv-errors](https://github.com/epoberezkin/ajv-errors) - plugin for custom error messages +- [ajv-i18n](https://github.com/epoberezkin/ajv-i18n) - internationalised error messages +- [ajv-istanbul](https://github.com/epoberezkin/ajv-istanbul) - plugin to instrument generated validation code to measure test coverage of your schemas +- [ajv-keywords](https://github.com/epoberezkin/ajv-keywords) - plugin with custom validation keywords (select, typeof, etc.) +- [ajv-merge-patch](https://github.com/epoberezkin/ajv-merge-patch) - plugin with keywords $merge and $patch +- [ajv-pack](https://github.com/epoberezkin/ajv-pack) - produces a compact module exporting validation functions + + +## Some packages using Ajv + +- [webpack](https://github.com/webpack/webpack) - a module bundler. Its main purpose is to bundle JavaScript files for usage in a browser +- [jsonscript-js](https://github.com/JSONScript/jsonscript-js) - the interpreter for [JSONScript](http://www.jsonscript.org) - scripted processing of existing endpoints and services +- [osprey-method-handler](https://github.com/mulesoft-labs/osprey-method-handler) - Express middleware for validating requests and responses based on a RAML method object, used in [osprey](https://github.com/mulesoft/osprey) - validating API proxy generated from a RAML definition +- [har-validator](https://github.com/ahmadnassri/har-validator) - HTTP Archive (HAR) validator +- [jsoneditor](https://github.com/josdejong/jsoneditor) - a web-based tool to view, edit, format, and validate JSON http://jsoneditoronline.org +- [JSON Schema Lint](https://github.com/nickcmaynard/jsonschemalint) - a web tool to validate JSON/YAML document against a single JSON Schema http://jsonschemalint.com +- [objection](https://github.com/vincit/objection.js) - SQL-friendly ORM for Node.js +- [table](https://github.com/gajus/table) - formats data into a string table +- [ripple-lib](https://github.com/ripple/ripple-lib) - a JavaScript API for interacting with [Ripple](https://ripple.com) in Node.js and the browser +- [restbase](https://github.com/wikimedia/restbase) - distributed storage with REST API & dispatcher for backend services built to provide a low-latency & high-throughput API for Wikipedia / Wikimedia content +- [hippie-swagger](https://github.com/CacheControl/hippie-swagger) - [Hippie](https://github.com/vesln/hippie) wrapper that provides end to end API testing with swagger validation +- [react-form-controlled](https://github.com/seeden/react-form-controlled) - React controlled form components with validation +- [rabbitmq-schema](https://github.com/tjmehta/rabbitmq-schema) - a schema definition module for RabbitMQ graphs and messages +- [@query/schema](https://www.npmjs.com/package/@query/schema) - stream filtering with a URI-safe query syntax parsing to JSON Schema +- [chai-ajv-json-schema](https://github.com/peon374/chai-ajv-json-schema) - chai plugin to us JSON Schema with expect in mocha tests +- [grunt-jsonschema-ajv](https://github.com/SignpostMarv/grunt-jsonschema-ajv) - Grunt plugin for validating files against JSON Schema +- [extract-text-webpack-plugin](https://github.com/webpack-contrib/extract-text-webpack-plugin) - extract text from bundle into a file +- [electron-builder](https://github.com/electron-userland/electron-builder) - a solution to package and build a ready for distribution Electron app +- [addons-linter](https://github.com/mozilla/addons-linter) - Mozilla Add-ons Linter +- [gh-pages-generator](https://github.com/epoberezkin/gh-pages-generator) - multi-page site generator converting markdown files to GitHub pages +- [ESLint](https://github.com/eslint/eslint) - the pluggable linting utility for JavaScript and JSX + + +## Tests + +``` +npm install +git submodule update --init +npm test +``` + +## Contributing + +All validation functions are generated using doT templates in [dot](https://github.com/epoberezkin/ajv/tree/master/lib/dot) folder. Templates are precompiled so doT is not a run-time dependency. + +`npm run build` - compiles templates to [dotjs](https://github.com/epoberezkin/ajv/tree/master/lib/dotjs) folder. + +`npm run watch` - automatically compiles templates when files in dot folder change + +Please see [Contributing guidelines](https://github.com/epoberezkin/ajv/blob/master/CONTRIBUTING.md) + + +## Changes history + +See https://github.com/epoberezkin/ajv/releases + +__Please note__: [Changes in version 6.0.0](https://github.com/epoberezkin/ajv/releases/tag/v6.0.0). + +[Version 5.0.0](https://github.com/epoberezkin/ajv/releases/tag/5.0.0). + +[Version 4.0.0](https://github.com/epoberezkin/ajv/releases/tag/4.0.0). + +[Version 3.0.0](https://github.com/epoberezkin/ajv/releases/tag/3.0.0). + +[Version 2.0.0](https://github.com/epoberezkin/ajv/releases/tag/2.0.0). + + +## License + +[MIT](https://github.com/epoberezkin/ajv/blob/master/LICENSE) diff --git a/src/node_modules/ajv/dist/ajv.bundle.js b/src/node_modules/ajv/dist/ajv.bundle.js new file mode 100644 index 0000000..286dc92 --- /dev/null +++ b/src/node_modules/ajv/dist/ajv.bundle.js @@ -0,0 +1,7165 @@ +(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.Ajv = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i%\\^`{|}]|%[0-9a-f]{2})|\{[+#./;?&=,!@|]?(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?(?:,(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?)*\})*$/i; +// For the source: https://gist.github.com/dperini/729294 +// For test cases: https://mathiasbynens.be/demo/url-regex +// @todo Delete current URL in favour of the commented out URL rule when this issue is fixed https://github.com/eslint/eslint/issues/7983. +// var URL = /^(?:(?:https?|ftp):\/\/)(?:\S+(?::\S*)?@)?(?:(?!10(?:\.\d{1,3}){3})(?!127(?:\.\d{1,3}){3})(?!169\.254(?:\.\d{1,3}){2})(?!192\.168(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z\u{00a1}-\u{ffff}0-9]+-?)*[a-z\u{00a1}-\u{ffff}0-9]+)(?:\.(?:[a-z\u{00a1}-\u{ffff}0-9]+-?)*[a-z\u{00a1}-\u{ffff}0-9]+)*(?:\.(?:[a-z\u{00a1}-\u{ffff}]{2,})))(?::\d{2,5})?(?:\/[^\s]*)?$/iu; +var URL = /^(?:(?:http[s\u017F]?|ftp):\/\/)(?:(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+(?::(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])*)?@)?(?:(?!10(?:\.[0-9]{1,3}){3})(?!127(?:\.[0-9]{1,3}){3})(?!169\.254(?:\.[0-9]{1,3}){2})(?!192\.168(?:\.[0-9]{1,3}){2})(?!172\.(?:1[6-9]|2[0-9]|3[01])(?:\.[0-9]{1,3}){2})(?:[1-9][0-9]?|1[0-9][0-9]|2[01][0-9]|22[0-3])(?:\.(?:1?[0-9]{1,2}|2[0-4][0-9]|25[0-5])){2}(?:\.(?:[1-9][0-9]?|1[0-9][0-9]|2[0-4][0-9]|25[0-4]))|(?:(?:(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+-?)*(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+)(?:\.(?:(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+-?)*(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+)*(?:\.(?:(?:[KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]){2,})))(?::[0-9]{2,5})?(?:\/(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])*)?$/i; +var UUID = /^(?:urn:uuid:)?[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}$/i; +var JSON_POINTER = /^(?:\/(?:[^~/]|~0|~1)*)*$/; +var JSON_POINTER_URI_FRAGMENT = /^#(?:\/(?:[a-z0-9_\-.!$&'()*+,;:=@]|%[0-9a-f]{2}|~0|~1)*)*$/i; +var RELATIVE_JSON_POINTER = /^(?:0|[1-9][0-9]*)(?:#|(?:\/(?:[^~/]|~0|~1)*)*)$/; + + +module.exports = formats; + +function formats(mode) { + mode = mode == 'full' ? 'full' : 'fast'; + return util.copy(formats[mode]); +} + + +formats.fast = { + // date: http://tools.ietf.org/html/rfc3339#section-5.6 + date: /^\d\d\d\d-[0-1]\d-[0-3]\d$/, + // date-time: http://tools.ietf.org/html/rfc3339#section-5.6 + time: /^(?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d:\d\d)?$/i, + 'date-time': /^\d\d\d\d-[0-1]\d-[0-3]\d[t\s](?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d:\d\d)$/i, + // uri: https://github.com/mafintosh/is-my-json-valid/blob/master/formats.js + uri: /^(?:[a-z][a-z0-9+-.]*:)(?:\/?\/)?[^\s]*$/i, + 'uri-reference': /^(?:(?:[a-z][a-z0-9+-.]*:)?\/?\/)?(?:[^\\\s#][^\s#]*)?(?:#[^\\\s]*)?$/i, + 'uri-template': URITEMPLATE, + url: URL, + // email (sources from jsen validator): + // http://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address#answer-8829363 + // http://www.w3.org/TR/html5/forms.html#valid-e-mail-address (search for 'willful violation') + email: /^[a-z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?)*$/i, + hostname: HOSTNAME, + // optimized https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9780596802837/ch07s16.html + ipv4: /^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/, + // optimized http://stackoverflow.com/questions/53497/regular-expression-that-matches-valid-ipv6-addresses + ipv6: /^\s*(?:(?:(?:[0-9a-f]{1,4}:){7}(?:[0-9a-f]{1,4}|:))|(?:(?:[0-9a-f]{1,4}:){6}(?::[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){5}(?:(?:(?::[0-9a-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){4}(?:(?:(?::[0-9a-f]{1,4}){1,3})|(?:(?::[0-9a-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){3}(?:(?:(?::[0-9a-f]{1,4}){1,4})|(?:(?::[0-9a-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){2}(?:(?:(?::[0-9a-f]{1,4}){1,5})|(?:(?::[0-9a-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){1}(?:(?:(?::[0-9a-f]{1,4}){1,6})|(?:(?::[0-9a-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?::(?:(?:(?::[0-9a-f]{1,4}){1,7})|(?:(?::[0-9a-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(?:%.+)?\s*$/i, + regex: regex, + // uuid: http://tools.ietf.org/html/rfc4122 + uuid: UUID, + // JSON-pointer: https://tools.ietf.org/html/rfc6901 + // uri fragment: https://tools.ietf.org/html/rfc3986#appendix-A + 'json-pointer': JSON_POINTER, + 'json-pointer-uri-fragment': JSON_POINTER_URI_FRAGMENT, + // relative JSON-pointer: http://tools.ietf.org/html/draft-luff-relative-json-pointer-00 + 'relative-json-pointer': RELATIVE_JSON_POINTER +}; + + +formats.full = { + date: date, + time: time, + 'date-time': date_time, + uri: uri, + 'uri-reference': URIREF, + 'uri-template': URITEMPLATE, + url: URL, + email: /^[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$/i, + hostname: hostname, + ipv4: /^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/, + ipv6: /^\s*(?:(?:(?:[0-9a-f]{1,4}:){7}(?:[0-9a-f]{1,4}|:))|(?:(?:[0-9a-f]{1,4}:){6}(?::[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){5}(?:(?:(?::[0-9a-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){4}(?:(?:(?::[0-9a-f]{1,4}){1,3})|(?:(?::[0-9a-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){3}(?:(?:(?::[0-9a-f]{1,4}){1,4})|(?:(?::[0-9a-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){2}(?:(?:(?::[0-9a-f]{1,4}){1,5})|(?:(?::[0-9a-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){1}(?:(?:(?::[0-9a-f]{1,4}){1,6})|(?:(?::[0-9a-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?::(?:(?:(?::[0-9a-f]{1,4}){1,7})|(?:(?::[0-9a-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(?:%.+)?\s*$/i, + regex: regex, + uuid: UUID, + 'json-pointer': JSON_POINTER, + 'json-pointer-uri-fragment': JSON_POINTER_URI_FRAGMENT, + 'relative-json-pointer': RELATIVE_JSON_POINTER +}; + + +function isLeapYear(year) { + // https://tools.ietf.org/html/rfc3339#appendix-C + return year % 4 === 0 && (year % 100 !== 0 || year % 400 === 0); +} + + +function date(str) { + // full-date from http://tools.ietf.org/html/rfc3339#section-5.6 + var matches = str.match(DATE); + if (!matches) return false; + + var year = +matches[1]; + var month = +matches[2]; + var day = +matches[3]; + + return month >= 1 && month <= 12 && day >= 1 && + day <= (month == 2 && isLeapYear(year) ? 29 : DAYS[month]); +} + + +function time(str, full) { + var matches = str.match(TIME); + if (!matches) return false; + + var hour = matches[1]; + var minute = matches[2]; + var second = matches[3]; + var timeZone = matches[5]; + return ((hour <= 23 && minute <= 59 && second <= 59) || + (hour == 23 && minute == 59 && second == 60)) && + (!full || timeZone); +} + + +var DATE_TIME_SEPARATOR = /t|\s/i; +function date_time(str) { + // http://tools.ietf.org/html/rfc3339#section-5.6 + var dateTime = str.split(DATE_TIME_SEPARATOR); + return dateTime.length == 2 && date(dateTime[0]) && time(dateTime[1], true); +} + + +function hostname(str) { + // https://tools.ietf.org/html/rfc1034#section-3.5 + // https://tools.ietf.org/html/rfc1123#section-2 + return str.length <= 255 && HOSTNAME.test(str); +} + + +var NOT_URI_FRAGMENT = /\/|:/; +function uri(str) { + // http://jmrware.com/articles/2009/uri_regexp/URI_regex.html + optional protocol + required "." + return NOT_URI_FRAGMENT.test(str) && URI.test(str); +} + + +var Z_ANCHOR = /[^\\]\\Z/; +function regex(str) { + if (Z_ANCHOR.test(str)) return false; + try { + new RegExp(str); + return true; + } catch(e) { + return false; + } +} + +},{"./util":10}],5:[function(require,module,exports){ +'use strict'; + +var resolve = require('./resolve') + , util = require('./util') + , errorClasses = require('./error_classes') + , stableStringify = require('fast-json-stable-stringify'); + +var validateGenerator = require('../dotjs/validate'); + +/** + * Functions below are used inside compiled validations function + */ + +var ucs2length = util.ucs2length; +var equal = require('fast-deep-equal'); + +// this error is thrown by async schemas to return validation errors via exception +var ValidationError = errorClasses.Validation; + +module.exports = compile; + + +/** + * Compiles schema to validation function + * @this Ajv + * @param {Object} schema schema object + * @param {Object} root object with information about the root schema for this schema + * @param {Object} localRefs the hash of local references inside the schema (created by resolve.id), used for inline resolution + * @param {String} baseId base ID for IDs in the schema + * @return {Function} validation function + */ +function compile(schema, root, localRefs, baseId) { + /* jshint validthis: true, evil: true */ + /* eslint no-shadow: 0 */ + var self = this + , opts = this._opts + , refVal = [ undefined ] + , refs = {} + , patterns = [] + , patternsHash = {} + , defaults = [] + , defaultsHash = {} + , customRules = []; + + root = root || { schema: schema, refVal: refVal, refs: refs }; + + var c = checkCompiling.call(this, schema, root, baseId); + var compilation = this._compilations[c.index]; + if (c.compiling) return (compilation.callValidate = callValidate); + + var formats = this._formats; + var RULES = this.RULES; + + try { + var v = localCompile(schema, root, localRefs, baseId); + compilation.validate = v; + var cv = compilation.callValidate; + if (cv) { + cv.schema = v.schema; + cv.errors = null; + cv.refs = v.refs; + cv.refVal = v.refVal; + cv.root = v.root; + cv.$async = v.$async; + if (opts.sourceCode) cv.source = v.source; + } + return v; + } finally { + endCompiling.call(this, schema, root, baseId); + } + + /* @this {*} - custom context, see passContext option */ + function callValidate() { + /* jshint validthis: true */ + var validate = compilation.validate; + var result = validate.apply(this, arguments); + callValidate.errors = validate.errors; + return result; + } + + function localCompile(_schema, _root, localRefs, baseId) { + var isRoot = !_root || (_root && _root.schema == _schema); + if (_root.schema != root.schema) + return compile.call(self, _schema, _root, localRefs, baseId); + + var $async = _schema.$async === true; + + var sourceCode = validateGenerator({ + isTop: true, + schema: _schema, + isRoot: isRoot, + baseId: baseId, + root: _root, + schemaPath: '', + errSchemaPath: '#', + errorPath: '""', + MissingRefError: errorClasses.MissingRef, + RULES: RULES, + validate: validateGenerator, + util: util, + resolve: resolve, + resolveRef: resolveRef, + usePattern: usePattern, + useDefault: useDefault, + useCustomRule: useCustomRule, + opts: opts, + formats: formats, + logger: self.logger, + self: self + }); + + sourceCode = vars(refVal, refValCode) + vars(patterns, patternCode) + + vars(defaults, defaultCode) + vars(customRules, customRuleCode) + + sourceCode; + + if (opts.processCode) sourceCode = opts.processCode(sourceCode); + // console.log('\n\n\n *** \n', JSON.stringify(sourceCode)); + var validate; + try { + var makeValidate = new Function( + 'self', + 'RULES', + 'formats', + 'root', + 'refVal', + 'defaults', + 'customRules', + 'equal', + 'ucs2length', + 'ValidationError', + sourceCode + ); + + validate = makeValidate( + self, + RULES, + formats, + root, + refVal, + defaults, + customRules, + equal, + ucs2length, + ValidationError + ); + + refVal[0] = validate; + } catch(e) { + self.logger.error('Error compiling schema, function code:', sourceCode); + throw e; + } + + validate.schema = _schema; + validate.errors = null; + validate.refs = refs; + validate.refVal = refVal; + validate.root = isRoot ? validate : _root; + if ($async) validate.$async = true; + if (opts.sourceCode === true) { + validate.source = { + code: sourceCode, + patterns: patterns, + defaults: defaults + }; + } + + return validate; + } + + function resolveRef(baseId, ref, isRoot) { + ref = resolve.url(baseId, ref); + var refIndex = refs[ref]; + var _refVal, refCode; + if (refIndex !== undefined) { + _refVal = refVal[refIndex]; + refCode = 'refVal[' + refIndex + ']'; + return resolvedRef(_refVal, refCode); + } + if (!isRoot && root.refs) { + var rootRefId = root.refs[ref]; + if (rootRefId !== undefined) { + _refVal = root.refVal[rootRefId]; + refCode = addLocalRef(ref, _refVal); + return resolvedRef(_refVal, refCode); + } + } + + refCode = addLocalRef(ref); + var v = resolve.call(self, localCompile, root, ref); + if (v === undefined) { + var localSchema = localRefs && localRefs[ref]; + if (localSchema) { + v = resolve.inlineRef(localSchema, opts.inlineRefs) + ? localSchema + : compile.call(self, localSchema, root, localRefs, baseId); + } + } + + if (v === undefined) { + removeLocalRef(ref); + } else { + replaceLocalRef(ref, v); + return resolvedRef(v, refCode); + } + } + + function addLocalRef(ref, v) { + var refId = refVal.length; + refVal[refId] = v; + refs[ref] = refId; + return 'refVal' + refId; + } + + function removeLocalRef(ref) { + delete refs[ref]; + } + + function replaceLocalRef(ref, v) { + var refId = refs[ref]; + refVal[refId] = v; + } + + function resolvedRef(refVal, code) { + return typeof refVal == 'object' || typeof refVal == 'boolean' + ? { code: code, schema: refVal, inline: true } + : { code: code, $async: refVal && !!refVal.$async }; + } + + function usePattern(regexStr) { + var index = patternsHash[regexStr]; + if (index === undefined) { + index = patternsHash[regexStr] = patterns.length; + patterns[index] = regexStr; + } + return 'pattern' + index; + } + + function useDefault(value) { + switch (typeof value) { + case 'boolean': + case 'number': + return '' + value; + case 'string': + return util.toQuotedString(value); + case 'object': + if (value === null) return 'null'; + var valueStr = stableStringify(value); + var index = defaultsHash[valueStr]; + if (index === undefined) { + index = defaultsHash[valueStr] = defaults.length; + defaults[index] = value; + } + return 'default' + index; + } + } + + function useCustomRule(rule, schema, parentSchema, it) { + if (self._opts.validateSchema !== false) { + var deps = rule.definition.dependencies; + if (deps && !deps.every(function(keyword) { + return Object.prototype.hasOwnProperty.call(parentSchema, keyword); + })) + throw new Error('parent schema must have all required keywords: ' + deps.join(',')); + + var validateSchema = rule.definition.validateSchema; + if (validateSchema) { + var valid = validateSchema(schema); + if (!valid) { + var message = 'keyword schema is invalid: ' + self.errorsText(validateSchema.errors); + if (self._opts.validateSchema == 'log') self.logger.error(message); + else throw new Error(message); + } + } + } + + var compile = rule.definition.compile + , inline = rule.definition.inline + , macro = rule.definition.macro; + + var validate; + if (compile) { + validate = compile.call(self, schema, parentSchema, it); + } else if (macro) { + validate = macro.call(self, schema, parentSchema, it); + if (opts.validateSchema !== false) self.validateSchema(validate, true); + } else if (inline) { + validate = inline.call(self, it, rule.keyword, schema, parentSchema); + } else { + validate = rule.definition.validate; + if (!validate) return; + } + + if (validate === undefined) + throw new Error('custom keyword "' + rule.keyword + '"failed to compile'); + + var index = customRules.length; + customRules[index] = validate; + + return { + code: 'customRule' + index, + validate: validate + }; + } +} + + +/** + * Checks if the schema is currently compiled + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + * @return {Object} object with properties "index" (compilation index) and "compiling" (boolean) + */ +function checkCompiling(schema, root, baseId) { + /* jshint validthis: true */ + var index = compIndex.call(this, schema, root, baseId); + if (index >= 0) return { index: index, compiling: true }; + index = this._compilations.length; + this._compilations[index] = { + schema: schema, + root: root, + baseId: baseId + }; + return { index: index, compiling: false }; +} + + +/** + * Removes the schema from the currently compiled list + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + */ +function endCompiling(schema, root, baseId) { + /* jshint validthis: true */ + var i = compIndex.call(this, schema, root, baseId); + if (i >= 0) this._compilations.splice(i, 1); +} + + +/** + * Index of schema compilation in the currently compiled list + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + * @return {Integer} compilation index + */ +function compIndex(schema, root, baseId) { + /* jshint validthis: true */ + for (var i=0; i= 0xD800 && value <= 0xDBFF && pos < len) { + // high surrogate, and there is a next character + value = str.charCodeAt(pos); + if ((value & 0xFC00) == 0xDC00) pos++; // low surrogate + } + } + return length; +}; + +},{}],10:[function(require,module,exports){ +'use strict'; + + +module.exports = { + copy: copy, + checkDataType: checkDataType, + checkDataTypes: checkDataTypes, + coerceToTypes: coerceToTypes, + toHash: toHash, + getProperty: getProperty, + escapeQuotes: escapeQuotes, + equal: require('fast-deep-equal'), + ucs2length: require('./ucs2length'), + varOccurences: varOccurences, + varReplace: varReplace, + cleanUpCode: cleanUpCode, + finalCleanUpCode: finalCleanUpCode, + schemaHasRules: schemaHasRules, + schemaHasRulesExcept: schemaHasRulesExcept, + schemaUnknownRules: schemaUnknownRules, + toQuotedString: toQuotedString, + getPathExpr: getPathExpr, + getPath: getPath, + getData: getData, + unescapeFragment: unescapeFragment, + unescapeJsonPointer: unescapeJsonPointer, + escapeFragment: escapeFragment, + escapeJsonPointer: escapeJsonPointer +}; + + +function copy(o, to) { + to = to || {}; + for (var key in o) to[key] = o[key]; + return to; +} + + +function checkDataType(dataType, data, negate) { + var EQUAL = negate ? ' !== ' : ' === ' + , AND = negate ? ' || ' : ' && ' + , OK = negate ? '!' : '' + , NOT = negate ? '' : '!'; + switch (dataType) { + case 'null': return data + EQUAL + 'null'; + case 'array': return OK + 'Array.isArray(' + data + ')'; + case 'object': return '(' + OK + data + AND + + 'typeof ' + data + EQUAL + '"object"' + AND + + NOT + 'Array.isArray(' + data + '))'; + case 'integer': return '(typeof ' + data + EQUAL + '"number"' + AND + + NOT + '(' + data + ' % 1)' + + AND + data + EQUAL + data + ')'; + default: return 'typeof ' + data + EQUAL + '"' + dataType + '"'; + } +} + + +function checkDataTypes(dataTypes, data) { + switch (dataTypes.length) { + case 1: return checkDataType(dataTypes[0], data, true); + default: + var code = ''; + var types = toHash(dataTypes); + if (types.array && types.object) { + code = types.null ? '(': '(!' + data + ' || '; + code += 'typeof ' + data + ' !== "object")'; + delete types.null; + delete types.array; + delete types.object; + } + if (types.number) delete types.integer; + for (var t in types) + code += (code ? ' && ' : '' ) + checkDataType(t, data, true); + + return code; + } +} + + +var COERCE_TO_TYPES = toHash([ 'string', 'number', 'integer', 'boolean', 'null' ]); +function coerceToTypes(optionCoerceTypes, dataTypes) { + if (Array.isArray(dataTypes)) { + var types = []; + for (var i=0; i= lvl) throw new Error('Cannot access property/index ' + up + ' levels up, current level is ' + lvl); + return paths[lvl - up]; + } + + if (up > lvl) throw new Error('Cannot access data ' + up + ' levels up, current level is ' + lvl); + data = 'data' + ((lvl - up) || ''); + if (!jsonPointer) return data; + } + + var expr = data; + var segments = jsonPointer.split('/'); + for (var i=0; i', + $notOp = $isMax ? '>' : '<', + $errorKeyword = undefined; + if ($isDataExcl) { + var $schemaValueExcl = it.util.getData($schemaExcl.$data, $dataLvl, it.dataPathArr), + $exclusive = 'exclusive' + $lvl, + $exclType = 'exclType' + $lvl, + $exclIsNumber = 'exclIsNumber' + $lvl, + $opExpr = 'op' + $lvl, + $opStr = '\' + ' + $opExpr + ' + \''; + out += ' var schemaExcl' + ($lvl) + ' = ' + ($schemaValueExcl) + '; '; + $schemaValueExcl = 'schemaExcl' + $lvl; + out += ' var ' + ($exclusive) + '; var ' + ($exclType) + ' = typeof ' + ($schemaValueExcl) + '; if (' + ($exclType) + ' != \'boolean\' && ' + ($exclType) + ' != \'undefined\' && ' + ($exclType) + ' != \'number\') { '; + var $errorKeyword = $exclusiveKeyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_exclusiveLimit') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'' + ($exclusiveKeyword) + ' should be boolean\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($exclType) + ' == \'number\' ? ( (' + ($exclusive) + ' = ' + ($schemaValue) + ' === undefined || ' + ($schemaValueExcl) + ' ' + ($op) + '= ' + ($schemaValue) + ') ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaValueExcl) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) : ( (' + ($exclusive) + ' = ' + ($schemaValueExcl) + ' === true) ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaValue) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) || ' + ($data) + ' !== ' + ($data) + ') { var op' + ($lvl) + ' = ' + ($exclusive) + ' ? \'' + ($op) + '\' : \'' + ($op) + '=\'; '; + if ($schema === undefined) { + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $schemaValue = $schemaValueExcl; + $isData = $isDataExcl; + } + } else { + var $exclIsNumber = typeof $schemaExcl == 'number', + $opStr = $op; + if ($exclIsNumber && $isData) { + var $opExpr = '\'' + $opStr + '\''; + out += ' if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ( ' + ($schemaValue) + ' === undefined || ' + ($schemaExcl) + ' ' + ($op) + '= ' + ($schemaValue) + ' ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaExcl) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) || ' + ($data) + ' !== ' + ($data) + ') { '; + } else { + if ($exclIsNumber && $schema === undefined) { + $exclusive = true; + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $schemaValue = $schemaExcl; + $notOp += '='; + } else { + if ($exclIsNumber) $schemaValue = Math[$isMax ? 'min' : 'max']($schemaExcl, $schema); + if ($schemaExcl === ($exclIsNumber ? $schemaValue : true)) { + $exclusive = true; + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $notOp += '='; + } else { + $exclusive = false; + $opStr += '='; + } + } + var $opExpr = '\'' + $opStr + '\''; + out += ' if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' || ' + ($data) + ' !== ' + ($data) + ') { '; + } + } + $errorKeyword = $errorKeyword || $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limit') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { comparison: ' + ($opExpr) + ', limit: ' + ($schemaValue) + ', exclusive: ' + ($exclusive) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be ' + ($opStr) + ' '; + if ($isData) { + out += '\' + ' + ($schemaValue); + } else { + out += '' + ($schemaValue) + '\''; + } + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + +},{}],13:[function(require,module,exports){ +'use strict'; +module.exports = function generate__limitItems(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $op = $keyword == 'maxItems' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($data) + '.length ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have '; + if ($keyword == 'maxItems') { + out += 'more'; + } else { + out += 'fewer'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' items\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + +},{}],14:[function(require,module,exports){ +'use strict'; +module.exports = function generate__limitLength(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $op = $keyword == 'maxLength' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + if (it.opts.unicode === false) { + out += ' ' + ($data) + '.length '; + } else { + out += ' ucs2length(' + ($data) + ') '; + } + out += ' ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitLength') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be '; + if ($keyword == 'maxLength') { + out += 'longer'; + } else { + out += 'shorter'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' characters\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + +},{}],15:[function(require,module,exports){ +'use strict'; +module.exports = function generate__limitProperties(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $op = $keyword == 'maxProperties' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' Object.keys(' + ($data) + ').length ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitProperties') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have '; + if ($keyword == 'maxProperties') { + out += 'more'; + } else { + out += 'fewer'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' properties\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + +},{}],16:[function(require,module,exports){ +'use strict'; +module.exports = function generate_allOf(it, $keyword, $ruleType) { + var out = ' '; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $currentBaseId = $it.baseId, + $allSchemasEmpty = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + $allSchemasEmpty = false; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if ($breakOnError) { + if ($allSchemasEmpty) { + out += ' if (true) { '; + } else { + out += ' ' + ($closingBraces.slice(0, -1)) + ' '; + } + } + out = it.util.cleanUpCode(out); + return out; +} + +},{}],17:[function(require,module,exports){ +'use strict'; +module.exports = function generate_anyOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $noEmptySchema = $schema.every(function($sch) { + return it.util.schemaHasRules($sch, it.RULES.all); + }); + if ($noEmptySchema) { + var $currentBaseId = $it.baseId; + out += ' var ' + ($errs) + ' = errors; var ' + ($valid) + ' = false; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($valid) + ' || ' + ($nextValid) + '; if (!' + ($valid) + ') { '; + $closingBraces += '}'; + } + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($closingBraces) + ' if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('anyOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should match some schema in anyOf\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += ' } else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + if (it.opts.allErrors) { + out += ' } '; + } + out = it.util.cleanUpCode(out); + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} + +},{}],18:[function(require,module,exports){ +'use strict'; +module.exports = function generate_comment(it, $keyword, $ruleType) { + var out = ' '; + var $schema = it.schema[$keyword]; + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $comment = it.util.toQuotedString($schema); + if (it.opts.$comment === true) { + out += ' console.log(' + ($comment) + ');'; + } else if (typeof it.opts.$comment == 'function') { + out += ' self._opts.$comment(' + ($comment) + ', ' + (it.util.toQuotedString($errSchemaPath)) + ', validate.root.schema);'; + } + return out; +} + +},{}],19:[function(require,module,exports){ +'use strict'; +module.exports = function generate_const(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (!$isData) { + out += ' var schema' + ($lvl) + ' = validate.schema' + ($schemaPath) + ';'; + } + out += 'var ' + ($valid) + ' = equal(' + ($data) + ', schema' + ($lvl) + '); if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('const') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { allowedValue: schema' + ($lvl) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be equal to constant\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' }'; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + +},{}],20:[function(require,module,exports){ +'use strict'; +module.exports = function generate_contains(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $idx = 'i' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $currentBaseId = it.baseId, + $nonEmptySchema = it.util.schemaHasRules($schema, it.RULES.all); + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if ($nonEmptySchema) { + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($nextValid) + ' = false; for (var ' + ($idx) + ' = 0; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' if (' + ($nextValid) + ') break; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($closingBraces) + ' if (!' + ($nextValid) + ') {'; + } else { + out += ' if (' + ($data) + '.length == 0) {'; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('contains') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should contain a valid item\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + if ($nonEmptySchema) { + out += ' errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + } + if (it.opts.allErrors) { + out += ' } '; + } + out = it.util.cleanUpCode(out); + return out; +} + +},{}],21:[function(require,module,exports){ +'use strict'; +module.exports = function generate_custom(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $rule = this, + $definition = 'definition' + $lvl, + $rDef = $rule.definition, + $closingBraces = ''; + var $compile, $inline, $macro, $ruleValidate, $validateCode; + if ($isData && $rDef.$data) { + $validateCode = 'keywordValidate' + $lvl; + var $validateSchema = $rDef.validateSchema; + out += ' var ' + ($definition) + ' = RULES.custom[\'' + ($keyword) + '\'].definition; var ' + ($validateCode) + ' = ' + ($definition) + '.validate;'; + } else { + $ruleValidate = it.useCustomRule($rule, $schema, it.schema, it); + if (!$ruleValidate) return; + $schemaValue = 'validate.schema' + $schemaPath; + $validateCode = $ruleValidate.code; + $compile = $rDef.compile; + $inline = $rDef.inline; + $macro = $rDef.macro; + } + var $ruleErrs = $validateCode + '.errors', + $i = 'i' + $lvl, + $ruleErr = 'ruleErr' + $lvl, + $asyncKeyword = $rDef.async; + if ($asyncKeyword && !it.async) throw new Error('async keyword in sync schema'); + if (!($inline || $macro)) { + out += '' + ($ruleErrs) + ' = null;'; + } + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if ($isData && $rDef.$data) { + $closingBraces += '}'; + out += ' if (' + ($schemaValue) + ' === undefined) { ' + ($valid) + ' = true; } else { '; + if ($validateSchema) { + $closingBraces += '}'; + out += ' ' + ($valid) + ' = ' + ($definition) + '.validateSchema(' + ($schemaValue) + '); if (' + ($valid) + ') { '; + } + } + if ($inline) { + if ($rDef.statements) { + out += ' ' + ($ruleValidate.validate) + ' '; + } else { + out += ' ' + ($valid) + ' = ' + ($ruleValidate.validate) + '; '; + } + } else if ($macro) { + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + $it.schema = $ruleValidate.validate; + $it.schemaPath = ''; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var $code = it.validate($it).replace(/validate\.schema/g, $validateCode); + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($code); + } else { + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; + out += ' ' + ($validateCode) + '.call( '; + if (it.opts.passContext) { + out += 'this'; + } else { + out += 'self'; + } + if ($compile || $rDef.schema === false) { + out += ' , ' + ($data) + ' '; + } else { + out += ' , ' + ($schemaValue) + ' , ' + ($data) + ' , validate.schema' + (it.schemaPath) + ' '; + } + out += ' , (dataPath || \'\')'; + if (it.errorPath != '""') { + out += ' + ' + (it.errorPath); + } + var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData', + $parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty'; + out += ' , ' + ($parentData) + ' , ' + ($parentDataProperty) + ' , rootData ) '; + var def_callRuleValidate = out; + out = $$outStack.pop(); + if ($rDef.errors === false) { + out += ' ' + ($valid) + ' = '; + if ($asyncKeyword) { + out += 'await '; + } + out += '' + (def_callRuleValidate) + '; '; + } else { + if ($asyncKeyword) { + $ruleErrs = 'customErrors' + $lvl; + out += ' var ' + ($ruleErrs) + ' = null; try { ' + ($valid) + ' = await ' + (def_callRuleValidate) + '; } catch (e) { ' + ($valid) + ' = false; if (e instanceof ValidationError) ' + ($ruleErrs) + ' = e.errors; else throw e; } '; + } else { + out += ' ' + ($ruleErrs) + ' = null; ' + ($valid) + ' = ' + (def_callRuleValidate) + '; '; + } + } + } + if ($rDef.modifying) { + out += ' if (' + ($parentData) + ') ' + ($data) + ' = ' + ($parentData) + '[' + ($parentDataProperty) + '];'; + } + out += '' + ($closingBraces); + if ($rDef.valid) { + if ($breakOnError) { + out += ' if (true) { '; + } + } else { + out += ' if ( '; + if ($rDef.valid === undefined) { + out += ' !'; + if ($macro) { + out += '' + ($nextValid); + } else { + out += '' + ($valid); + } + } else { + out += ' ' + (!$rDef.valid) + ' '; + } + out += ') { '; + $errorKeyword = $rule.keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'custom') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { keyword: \'' + ($rule.keyword) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should pass "' + ($rule.keyword) + '" keyword validation\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + var def_customError = out; + out = $$outStack.pop(); + if ($inline) { + if ($rDef.errors) { + if ($rDef.errors != 'full') { + out += ' for (var ' + ($i) + '=' + ($errs) + '; ' + ($i) + '= 0) { + if ($breakOnError) { + out += ' if (true) { '; + } + return out; + } else { + throw new Error('unknown format "' + $schema + '" is used in schema at path "' + it.errSchemaPath + '"'); + } + } + var $isObject = typeof $format == 'object' && !($format instanceof RegExp) && $format.validate; + var $formatType = $isObject && $format.type || 'string'; + if ($isObject) { + var $async = $format.async === true; + $format = $format.validate; + } + if ($formatType != $ruleType) { + if ($breakOnError) { + out += ' if (true) { '; + } + return out; + } + if ($async) { + if (!it.async) throw new Error('async format in sync schema'); + var $formatRef = 'formats' + it.util.getProperty($schema) + '.validate'; + out += ' if (!(await ' + ($formatRef) + '(' + ($data) + '))) { '; + } else { + out += ' if (! '; + var $formatRef = 'formats' + it.util.getProperty($schema); + if ($isObject) $formatRef += '.validate'; + if (typeof $format == 'function') { + out += ' ' + ($formatRef) + '(' + ($data) + ') '; + } else { + out += ' ' + ($formatRef) + '.test(' + ($data) + ') '; + } + out += ') { '; + } + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('format') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { format: '; + if ($isData) { + out += '' + ($schemaValue); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match format "'; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + (it.util.escapeQuotes($schema)); + } + out += '"\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + +},{}],25:[function(require,module,exports){ +'use strict'; +module.exports = function generate_if(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + $it.level++; + var $nextValid = 'valid' + $it.level; + var $thenSch = it.schema['then'], + $elseSch = it.schema['else'], + $thenPresent = $thenSch !== undefined && it.util.schemaHasRules($thenSch, it.RULES.all), + $elsePresent = $elseSch !== undefined && it.util.schemaHasRules($elseSch, it.RULES.all), + $currentBaseId = $it.baseId; + if ($thenPresent || $elsePresent) { + var $ifClause; + $it.createErrors = false; + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($errs) + ' = errors; var ' + ($valid) + ' = true; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + $it.createErrors = true; + out += ' errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + if ($thenPresent) { + out += ' if (' + ($nextValid) + ') { '; + $it.schema = it.schema['then']; + $it.schemaPath = it.schemaPath + '.then'; + $it.errSchemaPath = it.errSchemaPath + '/then'; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($nextValid) + '; '; + if ($thenPresent && $elsePresent) { + $ifClause = 'ifClause' + $lvl; + out += ' var ' + ($ifClause) + ' = \'then\'; '; + } else { + $ifClause = '\'then\''; + } + out += ' } '; + if ($elsePresent) { + out += ' else { '; + } + } else { + out += ' if (!' + ($nextValid) + ') { '; + } + if ($elsePresent) { + $it.schema = it.schema['else']; + $it.schemaPath = it.schemaPath + '.else'; + $it.errSchemaPath = it.errSchemaPath + '/else'; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($nextValid) + '; '; + if ($thenPresent && $elsePresent) { + $ifClause = 'ifClause' + $lvl; + out += ' var ' + ($ifClause) + ' = \'else\'; '; + } else { + $ifClause = '\'else\''; + } + out += ' } '; + } + out += ' if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('if') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { failingKeyword: ' + ($ifClause) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match "\' + ' + ($ifClause) + ' + \'" schema\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + out = it.util.cleanUpCode(out); + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} + +},{}],26:[function(require,module,exports){ +'use strict'; + +//all requires must be explicit because browserify won't work with dynamic requires +module.exports = { + '$ref': require('./ref'), + allOf: require('./allOf'), + anyOf: require('./anyOf'), + '$comment': require('./comment'), + const: require('./const'), + contains: require('./contains'), + dependencies: require('./dependencies'), + 'enum': require('./enum'), + format: require('./format'), + 'if': require('./if'), + items: require('./items'), + maximum: require('./_limit'), + minimum: require('./_limit'), + maxItems: require('./_limitItems'), + minItems: require('./_limitItems'), + maxLength: require('./_limitLength'), + minLength: require('./_limitLength'), + maxProperties: require('./_limitProperties'), + minProperties: require('./_limitProperties'), + multipleOf: require('./multipleOf'), + not: require('./not'), + oneOf: require('./oneOf'), + pattern: require('./pattern'), + properties: require('./properties'), + propertyNames: require('./propertyNames'), + required: require('./required'), + uniqueItems: require('./uniqueItems'), + validate: require('./validate') +}; + +},{"./_limit":12,"./_limitItems":13,"./_limitLength":14,"./_limitProperties":15,"./allOf":16,"./anyOf":17,"./comment":18,"./const":19,"./contains":20,"./dependencies":22,"./enum":23,"./format":24,"./if":25,"./items":27,"./multipleOf":28,"./not":29,"./oneOf":30,"./pattern":31,"./properties":32,"./propertyNames":33,"./ref":34,"./required":35,"./uniqueItems":36,"./validate":37}],27:[function(require,module,exports){ +'use strict'; +module.exports = function generate_items(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $idx = 'i' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $currentBaseId = it.baseId; + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if (Array.isArray($schema)) { + var $additionalItems = it.schema.additionalItems; + if ($additionalItems === false) { + out += ' ' + ($valid) + ' = ' + ($data) + '.length <= ' + ($schema.length) + '; '; + var $currErrSchemaPath = $errSchemaPath; + $errSchemaPath = it.errSchemaPath + '/additionalItems'; + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('additionalItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schema.length) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have more than ' + ($schema.length) + ' items\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + $errSchemaPath = $currErrSchemaPath; + if ($breakOnError) { + $closingBraces += '}'; + out += ' else { '; + } + } + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + out += ' ' + ($nextValid) + ' = true; if (' + ($data) + '.length > ' + ($i) + ') { '; + var $passData = $data + '[' + $i + ']'; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + $it.errorPath = it.util.getPathExpr(it.errorPath, $i, it.opts.jsonPointers, true); + $it.dataPathArr[$dataNxt] = $i; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if (typeof $additionalItems == 'object' && it.util.schemaHasRules($additionalItems, it.RULES.all)) { + $it.schema = $additionalItems; + $it.schemaPath = it.schemaPath + '.additionalItems'; + $it.errSchemaPath = it.errSchemaPath + '/additionalItems'; + out += ' ' + ($nextValid) + ' = true; if (' + ($data) + '.length > ' + ($schema.length) + ') { for (var ' + ($idx) + ' = ' + ($schema.length) + '; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' } } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } else if (it.util.schemaHasRules($schema, it.RULES.all)) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' for (var ' + ($idx) + ' = ' + (0) + '; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' }'; + } + if ($breakOnError) { + out += ' ' + ($closingBraces) + ' if (' + ($errs) + ' == errors) {'; + } + out = it.util.cleanUpCode(out); + return out; +} + +},{}],28:[function(require,module,exports){ +'use strict'; +module.exports = function generate_multipleOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + out += 'var division' + ($lvl) + ';if ('; + if ($isData) { + out += ' ' + ($schemaValue) + ' !== undefined && ( typeof ' + ($schemaValue) + ' != \'number\' || '; + } + out += ' (division' + ($lvl) + ' = ' + ($data) + ' / ' + ($schemaValue) + ', '; + if (it.opts.multipleOfPrecision) { + out += ' Math.abs(Math.round(division' + ($lvl) + ') - division' + ($lvl) + ') > 1e-' + (it.opts.multipleOfPrecision) + ' '; + } else { + out += ' division' + ($lvl) + ' !== parseInt(division' + ($lvl) + ') '; + } + out += ' ) '; + if ($isData) { + out += ' ) '; + } + out += ' ) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('multipleOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { multipleOf: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be multiple of '; + if ($isData) { + out += '\' + ' + ($schemaValue); + } else { + out += '' + ($schemaValue) + '\''; + } + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + +},{}],29:[function(require,module,exports){ +'use strict'; +module.exports = function generate_not(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + $it.level++; + var $nextValid = 'valid' + $it.level; + if (it.util.schemaHasRules($schema, it.RULES.all)) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($errs) + ' = errors; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.createErrors = false; + var $allErrorsOption; + if ($it.opts.allErrors) { + $allErrorsOption = $it.opts.allErrors; + $it.opts.allErrors = false; + } + out += ' ' + (it.validate($it)) + ' '; + $it.createErrors = true; + if ($allErrorsOption) $it.opts.allErrors = $allErrorsOption; + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' if (' + ($nextValid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('not') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be valid\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + if (it.opts.allErrors) { + out += ' } '; + } + } else { + out += ' var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('not') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be valid\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if ($breakOnError) { + out += ' if (false) { '; + } + } + return out; +} + +},{}],30:[function(require,module,exports){ +'use strict'; +module.exports = function generate_oneOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $currentBaseId = $it.baseId, + $prevValid = 'prevValid' + $lvl, + $passingSchemas = 'passingSchemas' + $lvl; + out += 'var ' + ($errs) + ' = errors , ' + ($prevValid) + ' = false , ' + ($valid) + ' = false , ' + ($passingSchemas) + ' = null; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + } else { + out += ' var ' + ($nextValid) + ' = true; '; + } + if ($i) { + out += ' if (' + ($nextValid) + ' && ' + ($prevValid) + ') { ' + ($valid) + ' = false; ' + ($passingSchemas) + ' = [' + ($passingSchemas) + ', ' + ($i) + ']; } else { '; + $closingBraces += '}'; + } + out += ' if (' + ($nextValid) + ') { ' + ($valid) + ' = ' + ($prevValid) + ' = true; ' + ($passingSchemas) + ' = ' + ($i) + '; }'; + } + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += '' + ($closingBraces) + 'if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('oneOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { passingSchemas: ' + ($passingSchemas) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match exactly one schema in oneOf\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += '} else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; }'; + if (it.opts.allErrors) { + out += ' } '; + } + return out; +} + +},{}],31:[function(require,module,exports){ +'use strict'; +module.exports = function generate_pattern(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $regexp = $isData ? '(new RegExp(' + $schemaValue + '))' : it.usePattern($schema); + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'string\') || '; + } + out += ' !' + ($regexp) + '.test(' + ($data) + ') ) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('pattern') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { pattern: '; + if ($isData) { + out += '' + ($schemaValue); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match pattern "'; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + (it.util.escapeQuotes($schema)); + } + out += '"\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + +},{}],32:[function(require,module,exports){ +'use strict'; +module.exports = function generate_properties(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $key = 'key' + $lvl, + $idx = 'idx' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $dataProperties = 'dataProperties' + $lvl; + var $schemaKeys = Object.keys($schema || {}), + $pProperties = it.schema.patternProperties || {}, + $pPropertyKeys = Object.keys($pProperties), + $aProperties = it.schema.additionalProperties, + $someProperties = $schemaKeys.length || $pPropertyKeys.length, + $noAdditional = $aProperties === false, + $additionalIsSchema = typeof $aProperties == 'object' && Object.keys($aProperties).length, + $removeAdditional = it.opts.removeAdditional, + $checkAdditional = $noAdditional || $additionalIsSchema || $removeAdditional, + $ownProperties = it.opts.ownProperties, + $currentBaseId = it.baseId; + var $required = it.schema.required; + if ($required && !(it.opts.$data && $required.$data) && $required.length < it.opts.loopRequired) var $requiredHash = it.util.toHash($required); + out += 'var ' + ($errs) + ' = errors;var ' + ($nextValid) + ' = true;'; + if ($ownProperties) { + out += ' var ' + ($dataProperties) + ' = undefined;'; + } + if ($checkAdditional) { + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + if ($someProperties) { + out += ' var isAdditional' + ($lvl) + ' = !(false '; + if ($schemaKeys.length) { + if ($schemaKeys.length > 8) { + out += ' || validate.schema' + ($schemaPath) + '.hasOwnProperty(' + ($key) + ') '; + } else { + var arr1 = $schemaKeys; + if (arr1) { + var $propertyKey, i1 = -1, + l1 = arr1.length - 1; + while (i1 < l1) { + $propertyKey = arr1[i1 += 1]; + out += ' || ' + ($key) + ' == ' + (it.util.toQuotedString($propertyKey)) + ' '; + } + } + } + } + if ($pPropertyKeys.length) { + var arr2 = $pPropertyKeys; + if (arr2) { + var $pProperty, $i = -1, + l2 = arr2.length - 1; + while ($i < l2) { + $pProperty = arr2[$i += 1]; + out += ' || ' + (it.usePattern($pProperty)) + '.test(' + ($key) + ') '; + } + } + } + out += ' ); if (isAdditional' + ($lvl) + ') { '; + } + if ($removeAdditional == 'all') { + out += ' delete ' + ($data) + '[' + ($key) + ']; '; + } else { + var $currentErrorPath = it.errorPath; + var $additionalProperty = '\' + ' + $key + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + } + if ($noAdditional) { + if ($removeAdditional) { + out += ' delete ' + ($data) + '[' + ($key) + ']; '; + } else { + out += ' ' + ($nextValid) + ' = false; '; + var $currErrSchemaPath = $errSchemaPath; + $errSchemaPath = it.errSchemaPath + '/additionalProperties'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('additionalProperties') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { additionalProperty: \'' + ($additionalProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is an invalid additional property'; + } else { + out += 'should NOT have additional properties'; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + $errSchemaPath = $currErrSchemaPath; + if ($breakOnError) { + out += ' break; '; + } + } + } else if ($additionalIsSchema) { + if ($removeAdditional == 'failing') { + out += ' var ' + ($errs) + ' = errors; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.schema = $aProperties; + $it.schemaPath = it.schemaPath + '.additionalProperties'; + $it.errSchemaPath = it.errSchemaPath + '/additionalProperties'; + $it.errorPath = it.opts._errorDataPathProperty ? it.errorPath : it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' if (!' + ($nextValid) + ') { errors = ' + ($errs) + '; if (validate.errors !== null) { if (errors) validate.errors.length = errors; else validate.errors = null; } delete ' + ($data) + '[' + ($key) + ']; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + } else { + $it.schema = $aProperties; + $it.schemaPath = it.schemaPath + '.additionalProperties'; + $it.errSchemaPath = it.errSchemaPath + '/additionalProperties'; + $it.errorPath = it.opts._errorDataPathProperty ? it.errorPath : it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + } + } + it.errorPath = $currentErrorPath; + } + if ($someProperties) { + out += ' } '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + var $useDefaults = it.opts.useDefaults && !it.compositeRule; + if ($schemaKeys.length) { + var arr3 = $schemaKeys; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $sch = $schema[$propertyKey]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + var $prop = it.util.getProperty($propertyKey), + $passData = $data + $prop, + $hasDefault = $useDefaults && $sch.default !== undefined; + $it.schema = $sch; + $it.schemaPath = $schemaPath + $prop; + $it.errSchemaPath = $errSchemaPath + '/' + it.util.escapeFragment($propertyKey); + $it.errorPath = it.util.getPath(it.errorPath, $propertyKey, it.opts.jsonPointers); + $it.dataPathArr[$dataNxt] = it.util.toQuotedString($propertyKey); + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + $code = it.util.varReplace($code, $nextData, $passData); + var $useData = $passData; + } else { + var $useData = $nextData; + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; '; + } + if ($hasDefault) { + out += ' ' + ($code) + ' '; + } else { + if ($requiredHash && $requiredHash[$propertyKey]) { + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { ' + ($nextValid) + ' = false; '; + var $currentErrorPath = it.errorPath, + $currErrSchemaPath = $errSchemaPath, + $missingProperty = it.util.escapeQuotes($propertyKey); + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + $errSchemaPath = it.errSchemaPath + '/required'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + $errSchemaPath = $currErrSchemaPath; + it.errorPath = $currentErrorPath; + out += ' } else { '; + } else { + if ($breakOnError) { + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { ' + ($nextValid) + ' = true; } else { '; + } else { + out += ' if (' + ($useData) + ' !== undefined '; + if ($ownProperties) { + out += ' && Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ' ) { '; + } + } + out += ' ' + ($code) + ' } '; + } + } + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if ($pPropertyKeys.length) { + var arr4 = $pPropertyKeys; + if (arr4) { + var $pProperty, i4 = -1, + l4 = arr4.length - 1; + while (i4 < l4) { + $pProperty = arr4[i4 += 1]; + var $sch = $pProperties[$pProperty]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + $it.schema = $sch; + $it.schemaPath = it.schemaPath + '.patternProperties' + it.util.getProperty($pProperty); + $it.errSchemaPath = it.errSchemaPath + '/patternProperties/' + it.util.escapeFragment($pProperty); + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + out += ' if (' + (it.usePattern($pProperty)) + '.test(' + ($key) + ')) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else ' + ($nextValid) + ' = true; '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces) + ' if (' + ($errs) + ' == errors) {'; + } + out = it.util.cleanUpCode(out); + return out; +} + +},{}],33:[function(require,module,exports){ +'use strict'; +module.exports = function generate_propertyNames(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + out += 'var ' + ($errs) + ' = errors;'; + if (it.util.schemaHasRules($schema, it.RULES.all)) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + var $key = 'key' + $lvl, + $idx = 'idx' + $lvl, + $i = 'i' + $lvl, + $invalidName = '\' + ' + $key + ' + \'', + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $dataProperties = 'dataProperties' + $lvl, + $ownProperties = it.opts.ownProperties, + $currentBaseId = it.baseId; + if ($ownProperties) { + out += ' var ' + ($dataProperties) + ' = undefined; '; + } + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + out += ' var startErrs' + ($lvl) + ' = errors; '; + var $passData = $key; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' if (!' + ($nextValid) + ') { for (var ' + ($i) + '=startErrs' + ($lvl) + '; ' + ($i) + '= it.opts.loopRequired, + $ownProperties = it.opts.ownProperties; + if ($breakOnError) { + out += ' var missing' + ($lvl) + '; '; + if ($loopRequired) { + if (!$isData) { + out += ' var ' + ($vSchema) + ' = validate.schema' + ($schemaPath) + '; '; + } + var $i = 'i' + $lvl, + $propertyPath = 'schema' + $lvl + '[' + $i + ']', + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr($currentErrorPath, $propertyPath, it.opts.jsonPointers); + } + out += ' var ' + ($valid) + ' = true; '; + if ($isData) { + out += ' if (schema' + ($lvl) + ' === undefined) ' + ($valid) + ' = true; else if (!Array.isArray(schema' + ($lvl) + ')) ' + ($valid) + ' = false; else {'; + } + out += ' for (var ' + ($i) + ' = 0; ' + ($i) + ' < ' + ($vSchema) + '.length; ' + ($i) + '++) { ' + ($valid) + ' = ' + ($data) + '[' + ($vSchema) + '[' + ($i) + ']] !== undefined '; + if ($ownProperties) { + out += ' && Object.prototype.hasOwnProperty.call(' + ($data) + ', ' + ($vSchema) + '[' + ($i) + ']) '; + } + out += '; if (!' + ($valid) + ') break; } '; + if ($isData) { + out += ' } '; + } + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + } else { + out += ' if ( '; + var arr2 = $required; + if (arr2) { + var $propertyKey, $i = -1, + l2 = arr2.length - 1; + while ($i < l2) { + $propertyKey = arr2[$i += 1]; + if ($i) { + out += ' || '; + } + var $prop = it.util.getProperty($propertyKey), + $useData = $data + $prop; + out += ' ( ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') && (missing' + ($lvl) + ' = ' + (it.util.toQuotedString(it.opts.jsonPointers ? $propertyKey : $prop)) + ') ) '; + } + } + out += ') { '; + var $propertyPath = 'missing' + $lvl, + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.opts.jsonPointers ? it.util.getPathExpr($currentErrorPath, $propertyPath, true) : $currentErrorPath + ' + ' + $propertyPath; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + } + } else { + if ($loopRequired) { + if (!$isData) { + out += ' var ' + ($vSchema) + ' = validate.schema' + ($schemaPath) + '; '; + } + var $i = 'i' + $lvl, + $propertyPath = 'schema' + $lvl + '[' + $i + ']', + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr($currentErrorPath, $propertyPath, it.opts.jsonPointers); + } + if ($isData) { + out += ' if (' + ($vSchema) + ' && !Array.isArray(' + ($vSchema) + ')) { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } else if (' + ($vSchema) + ' !== undefined) { '; + } + out += ' for (var ' + ($i) + ' = 0; ' + ($i) + ' < ' + ($vSchema) + '.length; ' + ($i) + '++) { if (' + ($data) + '[' + ($vSchema) + '[' + ($i) + ']] === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', ' + ($vSchema) + '[' + ($i) + ']) '; + } + out += ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } } '; + if ($isData) { + out += ' } '; + } + } else { + var arr3 = $required; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $prop = it.util.getProperty($propertyKey), + $missingProperty = it.util.escapeQuotes($propertyKey), + $useData = $data + $prop; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } '; + } + } + } + } + it.errorPath = $currentErrorPath; + } else if ($breakOnError) { + out += ' if (true) {'; + } + return out; +} + +},{}],36:[function(require,module,exports){ +'use strict'; +module.exports = function generate_uniqueItems(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (($schema || $isData) && it.opts.uniqueItems !== false) { + if ($isData) { + out += ' var ' + ($valid) + '; if (' + ($schemaValue) + ' === false || ' + ($schemaValue) + ' === undefined) ' + ($valid) + ' = true; else if (typeof ' + ($schemaValue) + ' != \'boolean\') ' + ($valid) + ' = false; else { '; + } + out += ' var i = ' + ($data) + '.length , ' + ($valid) + ' = true , j; if (i > 1) { '; + var $itemType = it.schema.items && it.schema.items.type, + $typeIsArray = Array.isArray($itemType); + if (!$itemType || $itemType == 'object' || $itemType == 'array' || ($typeIsArray && ($itemType.indexOf('object') >= 0 || $itemType.indexOf('array') >= 0))) { + out += ' outer: for (;i--;) { for (j = i; j--;) { if (equal(' + ($data) + '[i], ' + ($data) + '[j])) { ' + ($valid) + ' = false; break outer; } } } '; + } else { + out += ' var itemIndices = {}, item; for (;i--;) { var item = ' + ($data) + '[i]; '; + var $method = 'checkDataType' + ($typeIsArray ? 's' : ''); + out += ' if (' + (it.util[$method]($itemType, 'item', true)) + ') continue; '; + if ($typeIsArray) { + out += ' if (typeof item == \'string\') item = \'"\' + item; '; + } + out += ' if (typeof itemIndices[item] == \'number\') { ' + ($valid) + ' = false; j = itemIndices[item]; break; } itemIndices[item] = i; } '; + } + out += ' } '; + if ($isData) { + out += ' } '; + } + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('uniqueItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { i: i, j: j } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have duplicate items (items ## \' + j + \' and \' + i + \' are identical)\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} + +},{}],37:[function(require,module,exports){ +'use strict'; +module.exports = function generate_validate(it, $keyword, $ruleType) { + var out = ''; + var $async = it.schema.$async === true, + $refKeywords = it.util.schemaHasRulesExcept(it.schema, it.RULES.all, '$ref'), + $id = it.self._getId(it.schema); + if (it.opts.strictKeywords) { + var $unknownKwd = it.util.schemaUnknownRules(it.schema, it.RULES.keywords); + if ($unknownKwd) { + var $keywordsMsg = 'unknown keyword: ' + $unknownKwd; + if (it.opts.strictKeywords === 'log') it.logger.warn($keywordsMsg); + else throw new Error($keywordsMsg); + } + } + if (it.isTop) { + out += ' var validate = '; + if ($async) { + it.async = true; + out += 'async '; + } + out += 'function(data, dataPath, parentData, parentDataProperty, rootData) { \'use strict\'; '; + if ($id && (it.opts.sourceCode || it.opts.processCode)) { + out += ' ' + ('/\*# sourceURL=' + $id + ' */') + ' '; + } + } + if (typeof it.schema == 'boolean' || !($refKeywords || it.schema.$ref)) { + var $keyword = 'false schema'; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + if (it.schema === false) { + if (it.isTop) { + $breakOnError = true; + } else { + out += ' var ' + ($valid) + ' = false; '; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'false schema') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'boolean schema is false\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + } else { + if (it.isTop) { + if ($async) { + out += ' return data; '; + } else { + out += ' validate.errors = null; return true; '; + } + } else { + out += ' var ' + ($valid) + ' = true; '; + } + } + if (it.isTop) { + out += ' }; return validate; '; + } + return out; + } + if (it.isTop) { + var $top = it.isTop, + $lvl = it.level = 0, + $dataLvl = it.dataLevel = 0, + $data = 'data'; + it.rootId = it.resolve.fullPath(it.self._getId(it.root.schema)); + it.baseId = it.baseId || it.rootId; + delete it.isTop; + it.dataPathArr = [undefined]; + if (it.schema.default !== undefined && it.opts.useDefaults && it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored in the schema root'; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + out += ' var vErrors = null; '; + out += ' var errors = 0; '; + out += ' if (rootData === undefined) rootData = data; '; + } else { + var $lvl = it.level, + $dataLvl = it.dataLevel, + $data = 'data' + ($dataLvl || ''); + if ($id) it.baseId = it.resolve.url(it.baseId, $id); + if ($async && !it.async) throw new Error('async schema in sync schema'); + out += ' var errs_' + ($lvl) + ' = errors;'; + } + var $valid = 'valid' + $lvl, + $breakOnError = !it.opts.allErrors, + $closingBraces1 = '', + $closingBraces2 = ''; + var $errorKeyword; + var $typeSchema = it.schema.type, + $typeIsArray = Array.isArray($typeSchema); + if ($typeSchema && it.opts.nullable && it.schema.nullable === true) { + if ($typeIsArray) { + if ($typeSchema.indexOf('null') == -1) $typeSchema = $typeSchema.concat('null'); + } else if ($typeSchema != 'null') { + $typeSchema = [$typeSchema, 'null']; + $typeIsArray = true; + } + } + if ($typeIsArray && $typeSchema.length == 1) { + $typeSchema = $typeSchema[0]; + $typeIsArray = false; + } + if (it.schema.$ref && $refKeywords) { + if (it.opts.extendRefs == 'fail') { + throw new Error('$ref: validation keywords used in schema at path "' + it.errSchemaPath + '" (see option extendRefs)'); + } else if (it.opts.extendRefs !== true) { + $refKeywords = false; + it.logger.warn('$ref: keywords ignored in schema at path "' + it.errSchemaPath + '"'); + } + } + if (it.schema.$comment && it.opts.$comment) { + out += ' ' + (it.RULES.all.$comment.code(it, '$comment')); + } + if ($typeSchema) { + if (it.opts.coerceTypes) { + var $coerceToTypes = it.util.coerceToTypes(it.opts.coerceTypes, $typeSchema); + } + var $rulesGroup = it.RULES.types[$typeSchema]; + if ($coerceToTypes || $typeIsArray || $rulesGroup === true || ($rulesGroup && !$shouldUseGroup($rulesGroup))) { + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type'; + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type', + $method = $typeIsArray ? 'checkDataTypes' : 'checkDataType'; + out += ' if (' + (it.util[$method]($typeSchema, $data, true)) + ') { '; + if ($coerceToTypes) { + var $dataType = 'dataType' + $lvl, + $coerced = 'coerced' + $lvl; + out += ' var ' + ($dataType) + ' = typeof ' + ($data) + '; '; + if (it.opts.coerceTypes == 'array') { + out += ' if (' + ($dataType) + ' == \'object\' && Array.isArray(' + ($data) + ')) ' + ($dataType) + ' = \'array\'; '; + } + out += ' var ' + ($coerced) + ' = undefined; '; + var $bracesCoercion = ''; + var arr1 = $coerceToTypes; + if (arr1) { + var $type, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $type = arr1[$i += 1]; + if ($i) { + out += ' if (' + ($coerced) + ' === undefined) { '; + $bracesCoercion += '}'; + } + if (it.opts.coerceTypes == 'array' && $type != 'array') { + out += ' if (' + ($dataType) + ' == \'array\' && ' + ($data) + '.length == 1) { ' + ($coerced) + ' = ' + ($data) + ' = ' + ($data) + '[0]; ' + ($dataType) + ' = typeof ' + ($data) + '; } '; + } + if ($type == 'string') { + out += ' if (' + ($dataType) + ' == \'number\' || ' + ($dataType) + ' == \'boolean\') ' + ($coerced) + ' = \'\' + ' + ($data) + '; else if (' + ($data) + ' === null) ' + ($coerced) + ' = \'\'; '; + } else if ($type == 'number' || $type == 'integer') { + out += ' if (' + ($dataType) + ' == \'boolean\' || ' + ($data) + ' === null || (' + ($dataType) + ' == \'string\' && ' + ($data) + ' && ' + ($data) + ' == +' + ($data) + ' '; + if ($type == 'integer') { + out += ' && !(' + ($data) + ' % 1)'; + } + out += ')) ' + ($coerced) + ' = +' + ($data) + '; '; + } else if ($type == 'boolean') { + out += ' if (' + ($data) + ' === \'false\' || ' + ($data) + ' === 0 || ' + ($data) + ' === null) ' + ($coerced) + ' = false; else if (' + ($data) + ' === \'true\' || ' + ($data) + ' === 1) ' + ($coerced) + ' = true; '; + } else if ($type == 'null') { + out += ' if (' + ($data) + ' === \'\' || ' + ($data) + ' === 0 || ' + ($data) + ' === false) ' + ($coerced) + ' = null; '; + } else if (it.opts.coerceTypes == 'array' && $type == 'array') { + out += ' if (' + ($dataType) + ' == \'string\' || ' + ($dataType) + ' == \'number\' || ' + ($dataType) + ' == \'boolean\' || ' + ($data) + ' == null) ' + ($coerced) + ' = [' + ($data) + ']; '; + } + } + } + out += ' ' + ($bracesCoercion) + ' if (' + ($coerced) + ' === undefined) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData', + $parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty'; + out += ' ' + ($data) + ' = ' + ($coerced) + '; '; + if (!$dataLvl) { + out += 'if (' + ($parentData) + ' !== undefined)'; + } + out += ' ' + ($parentData) + '[' + ($parentDataProperty) + '] = ' + ($coerced) + '; } '; + } else { + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + } + out += ' } '; + } + } + if (it.schema.$ref && !$refKeywords) { + out += ' ' + (it.RULES.all.$ref.code(it, '$ref')) + ' '; + if ($breakOnError) { + out += ' } if (errors === '; + if ($top) { + out += '0'; + } else { + out += 'errs_' + ($lvl); + } + out += ') { '; + $closingBraces2 += '}'; + } + } else { + var arr2 = it.RULES; + if (arr2) { + var $rulesGroup, i2 = -1, + l2 = arr2.length - 1; + while (i2 < l2) { + $rulesGroup = arr2[i2 += 1]; + if ($shouldUseGroup($rulesGroup)) { + if ($rulesGroup.type) { + out += ' if (' + (it.util.checkDataType($rulesGroup.type, $data)) + ') { '; + } + if (it.opts.useDefaults) { + if ($rulesGroup.type == 'object' && it.schema.properties) { + var $schema = it.schema.properties, + $schemaKeys = Object.keys($schema); + var arr3 = $schemaKeys; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $sch = $schema[$propertyKey]; + if ($sch.default !== undefined) { + var $passData = $data + it.util.getProperty($propertyKey); + if (it.compositeRule) { + if (it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored for: ' + $passData; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + } else { + out += ' if (' + ($passData) + ' === undefined '; + if (it.opts.useDefaults == 'empty') { + out += ' || ' + ($passData) + ' === null || ' + ($passData) + ' === \'\' '; + } + out += ' ) ' + ($passData) + ' = '; + if (it.opts.useDefaults == 'shared') { + out += ' ' + (it.useDefault($sch.default)) + ' '; + } else { + out += ' ' + (JSON.stringify($sch.default)) + ' '; + } + out += '; '; + } + } + } + } + } else if ($rulesGroup.type == 'array' && Array.isArray(it.schema.items)) { + var arr4 = it.schema.items; + if (arr4) { + var $sch, $i = -1, + l4 = arr4.length - 1; + while ($i < l4) { + $sch = arr4[$i += 1]; + if ($sch.default !== undefined) { + var $passData = $data + '[' + $i + ']'; + if (it.compositeRule) { + if (it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored for: ' + $passData; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + } else { + out += ' if (' + ($passData) + ' === undefined '; + if (it.opts.useDefaults == 'empty') { + out += ' || ' + ($passData) + ' === null || ' + ($passData) + ' === \'\' '; + } + out += ' ) ' + ($passData) + ' = '; + if (it.opts.useDefaults == 'shared') { + out += ' ' + (it.useDefault($sch.default)) + ' '; + } else { + out += ' ' + (JSON.stringify($sch.default)) + ' '; + } + out += '; '; + } + } + } + } + } + } + var arr5 = $rulesGroup.rules; + if (arr5) { + var $rule, i5 = -1, + l5 = arr5.length - 1; + while (i5 < l5) { + $rule = arr5[i5 += 1]; + if ($shouldUseRule($rule)) { + var $code = $rule.code(it, $rule.keyword, $rulesGroup.type); + if ($code) { + out += ' ' + ($code) + ' '; + if ($breakOnError) { + $closingBraces1 += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces1) + ' '; + $closingBraces1 = ''; + } + if ($rulesGroup.type) { + out += ' } '; + if ($typeSchema && $typeSchema === $rulesGroup.type && !$coerceToTypes) { + out += ' else { '; + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + } + } + if ($breakOnError) { + out += ' if (errors === '; + if ($top) { + out += '0'; + } else { + out += 'errs_' + ($lvl); + } + out += ') { '; + $closingBraces2 += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces2) + ' '; + } + if ($top) { + if ($async) { + out += ' if (errors === 0) return data; '; + out += ' else throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; '; + out += ' return errors === 0; '; + } + out += ' }; return validate;'; + } else { + out += ' var ' + ($valid) + ' = errors === errs_' + ($lvl) + ';'; + } + out = it.util.cleanUpCode(out); + if ($top) { + out = it.util.finalCleanUpCode(out, $async); + } + + function $shouldUseGroup($rulesGroup) { + var rules = $rulesGroup.rules; + for (var i = 0; i < rules.length; i++) + if ($shouldUseRule(rules[i])) return true; + } + + function $shouldUseRule($rule) { + return it.schema[$rule.keyword] !== undefined || ($rule.implements && $ruleImplementsSomeKeyword($rule)); + } + + function $ruleImplementsSomeKeyword($rule) { + var impl = $rule.implements; + for (var i = 0; i < impl.length; i++) + if (it.schema[impl[i]] !== undefined) return true; + } + return out; +} + +},{}],38:[function(require,module,exports){ +'use strict'; + +var IDENTIFIER = /^[a-z_$][a-z0-9_$-]*$/i; +var customRuleCode = require('./dotjs/custom'); +var metaSchema = require('./refs/json-schema-draft-07.json'); + +module.exports = { + add: addKeyword, + get: getKeyword, + remove: removeKeyword, + validate: validateKeyword +}; + +var definitionSchema = { + definitions: { + simpleTypes: metaSchema.definitions.simpleTypes + }, + type: 'object', + dependencies: { + schema: ['validate'], + $data: ['validate'], + statements: ['inline'], + valid: {not: {required: ['macro']}} + }, + properties: { + type: metaSchema.properties.type, + schema: {type: 'boolean'}, + statements: {type: 'boolean'}, + dependencies: { + type: 'array', + items: {type: 'string'} + }, + metaSchema: {type: 'object'}, + modifying: {type: 'boolean'}, + valid: {type: 'boolean'}, + $data: {type: 'boolean'}, + async: {type: 'boolean'}, + errors: { + anyOf: [ + {type: 'boolean'}, + {const: 'full'} + ] + } + } +}; + +/** + * Define custom keyword + * @this Ajv + * @param {String} keyword custom keyword, should be unique (including different from all standard, custom and macro keywords). + * @param {Object} definition keyword definition object with properties `type` (type(s) which the keyword applies to), `validate` or `compile`. + * @return {Ajv} this for method chaining + */ +function addKeyword(keyword, definition) { + /* jshint validthis: true */ + /* eslint no-shadow: 0 */ + var RULES = this.RULES; + if (RULES.keywords[keyword]) + throw new Error('Keyword ' + keyword + ' is already defined'); + + if (!IDENTIFIER.test(keyword)) + throw new Error('Keyword ' + keyword + ' is not a valid identifier'); + + if (definition) { + this.validateKeyword(definition, true); + + var dataType = definition.type; + if (Array.isArray(dataType)) { + for (var i=0; i 1) { + sets[0] = sets[0].slice(0, -1); + var xl = sets.length - 1; + for (var x = 1; x < xl; ++x) { + sets[x] = sets[x].slice(1, -1); + } + sets[xl] = sets[xl].slice(1); + return sets.join(''); + } else { + return sets[0]; + } +} +function subexp(str) { + return "(?:" + str + ")"; +} +function typeOf(o) { + return o === undefined ? "undefined" : o === null ? "null" : Object.prototype.toString.call(o).split(" ").pop().split("]").shift().toLowerCase(); +} +function toUpperCase(str) { + return str.toUpperCase(); +} +function toArray(obj) { + return obj !== undefined && obj !== null ? obj instanceof Array ? obj : typeof obj.length !== "number" || obj.split || obj.setInterval || obj.call ? [obj] : Array.prototype.slice.call(obj) : []; +} +function assign(target, source) { + var obj = target; + if (source) { + for (var key in source) { + obj[key] = source[key]; + } + } + return obj; +} + +function buildExps(isIRI) { + var ALPHA$$ = "[A-Za-z]", + CR$ = "[\\x0D]", + DIGIT$$ = "[0-9]", + DQUOTE$$ = "[\\x22]", + HEXDIG$$ = merge(DIGIT$$, "[A-Fa-f]"), + //case-insensitive + LF$$ = "[\\x0A]", + SP$$ = "[\\x20]", + PCT_ENCODED$ = subexp(subexp("%[EFef]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%[89A-Fa-f]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%" + HEXDIG$$ + HEXDIG$$)), + //expanded + GEN_DELIMS$$ = "[\\:\\/\\?\\#\\[\\]\\@]", + SUB_DELIMS$$ = "[\\!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=]", + RESERVED$$ = merge(GEN_DELIMS$$, SUB_DELIMS$$), + UCSCHAR$$ = isIRI ? "[\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]" : "[]", + //subset, excludes bidi control characters + IPRIVATE$$ = isIRI ? "[\\uE000-\\uF8FF]" : "[]", + //subset + UNRESERVED$$ = merge(ALPHA$$, DIGIT$$, "[\\-\\.\\_\\~]", UCSCHAR$$), + SCHEME$ = subexp(ALPHA$$ + merge(ALPHA$$, DIGIT$$, "[\\+\\-\\.]") + "*"), + USERINFO$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:]")) + "*"), + DEC_OCTET$ = subexp(subexp("25[0-5]") + "|" + subexp("2[0-4]" + DIGIT$$) + "|" + subexp("1" + DIGIT$$ + DIGIT$$) + "|" + subexp("[1-9]" + DIGIT$$) + "|" + DIGIT$$), + DEC_OCTET_RELAXED$ = subexp(subexp("25[0-5]") + "|" + subexp("2[0-4]" + DIGIT$$) + "|" + subexp("1" + DIGIT$$ + DIGIT$$) + "|" + subexp("0?[1-9]" + DIGIT$$) + "|0?0?" + DIGIT$$), + //relaxed parsing rules + IPV4ADDRESS$ = subexp(DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$), + H16$ = subexp(HEXDIG$$ + "{1,4}"), + LS32$ = subexp(subexp(H16$ + "\\:" + H16$) + "|" + IPV4ADDRESS$), + IPV6ADDRESS1$ = subexp(subexp(H16$ + "\\:") + "{6}" + LS32$), + // 6( h16 ":" ) ls32 + IPV6ADDRESS2$ = subexp("\\:\\:" + subexp(H16$ + "\\:") + "{5}" + LS32$), + // "::" 5( h16 ":" ) ls32 + IPV6ADDRESS3$ = subexp(subexp(H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{4}" + LS32$), + //[ h16 ] "::" 4( h16 ":" ) ls32 + IPV6ADDRESS4$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,1}" + H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{3}" + LS32$), + //[ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 + IPV6ADDRESS5$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,2}" + H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{2}" + LS32$), + //[ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 + IPV6ADDRESS6$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,3}" + H16$) + "?\\:\\:" + H16$ + "\\:" + LS32$), + //[ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 + IPV6ADDRESS7$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,4}" + H16$) + "?\\:\\:" + LS32$), + //[ *4( h16 ":" ) h16 ] "::" ls32 + IPV6ADDRESS8$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,5}" + H16$) + "?\\:\\:" + H16$), + //[ *5( h16 ":" ) h16 ] "::" h16 + IPV6ADDRESS9$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,6}" + H16$) + "?\\:\\:"), + //[ *6( h16 ":" ) h16 ] "::" + IPV6ADDRESS$ = subexp([IPV6ADDRESS1$, IPV6ADDRESS2$, IPV6ADDRESS3$, IPV6ADDRESS4$, IPV6ADDRESS5$, IPV6ADDRESS6$, IPV6ADDRESS7$, IPV6ADDRESS8$, IPV6ADDRESS9$].join("|")), + ZONEID$ = subexp(subexp(UNRESERVED$$ + "|" + PCT_ENCODED$) + "+"), + //RFC 6874 + IPV6ADDRZ$ = subexp(IPV6ADDRESS$ + "\\%25" + ZONEID$), + //RFC 6874 + IPV6ADDRZ_RELAXED$ = subexp(IPV6ADDRESS$ + subexp("\\%25|\\%(?!" + HEXDIG$$ + "{2})") + ZONEID$), + //RFC 6874, with relaxed parsing rules + IPVFUTURE$ = subexp("[vV]" + HEXDIG$$ + "+\\." + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:]") + "+"), + IP_LITERAL$ = subexp("\\[" + subexp(IPV6ADDRZ_RELAXED$ + "|" + IPV6ADDRESS$ + "|" + IPVFUTURE$) + "\\]"), + //RFC 6874 + REG_NAME$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$)) + "*"), + HOST$ = subexp(IP_LITERAL$ + "|" + IPV4ADDRESS$ + "(?!" + REG_NAME$ + ")" + "|" + REG_NAME$), + PORT$ = subexp(DIGIT$$ + "*"), + AUTHORITY$ = subexp(subexp(USERINFO$ + "@") + "?" + HOST$ + subexp("\\:" + PORT$) + "?"), + PCHAR$ = subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@]")), + SEGMENT$ = subexp(PCHAR$ + "*"), + SEGMENT_NZ$ = subexp(PCHAR$ + "+"), + SEGMENT_NZ_NC$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\@]")) + "+"), + PATH_ABEMPTY$ = subexp(subexp("\\/" + SEGMENT$) + "*"), + PATH_ABSOLUTE$ = subexp("\\/" + subexp(SEGMENT_NZ$ + PATH_ABEMPTY$) + "?"), + //simplified + PATH_NOSCHEME$ = subexp(SEGMENT_NZ_NC$ + PATH_ABEMPTY$), + //simplified + PATH_ROOTLESS$ = subexp(SEGMENT_NZ$ + PATH_ABEMPTY$), + //simplified + PATH_EMPTY$ = "(?!" + PCHAR$ + ")", + PATH$ = subexp(PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$), + QUERY$ = subexp(subexp(PCHAR$ + "|" + merge("[\\/\\?]", IPRIVATE$$)) + "*"), + FRAGMENT$ = subexp(subexp(PCHAR$ + "|[\\/\\?]") + "*"), + HIER_PART$ = subexp(subexp("\\/\\/" + AUTHORITY$ + PATH_ABEMPTY$) + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$), + URI$ = subexp(SCHEME$ + "\\:" + HIER_PART$ + subexp("\\?" + QUERY$) + "?" + subexp("\\#" + FRAGMENT$) + "?"), + RELATIVE_PART$ = subexp(subexp("\\/\\/" + AUTHORITY$ + PATH_ABEMPTY$) + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_EMPTY$), + RELATIVE$ = subexp(RELATIVE_PART$ + subexp("\\?" + QUERY$) + "?" + subexp("\\#" + FRAGMENT$) + "?"), + URI_REFERENCE$ = subexp(URI$ + "|" + RELATIVE$), + ABSOLUTE_URI$ = subexp(SCHEME$ + "\\:" + HIER_PART$ + subexp("\\?" + QUERY$) + "?"), + GENERIC_REF$ = "^(" + SCHEME$ + ")\\:" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", + RELATIVE_REF$ = "^(){0}" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", + ABSOLUTE_REF$ = "^(" + SCHEME$ + ")\\:" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?$", + SAMEDOC_REF$ = "^" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", + AUTHORITY_REF$ = "^" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?$"; + return { + NOT_SCHEME: new RegExp(merge("[^]", ALPHA$$, DIGIT$$, "[\\+\\-\\.]"), "g"), + NOT_USERINFO: new RegExp(merge("[^\\%\\:]", UNRESERVED$$, SUB_DELIMS$$), "g"), + NOT_HOST: new RegExp(merge("[^\\%\\[\\]\\:]", UNRESERVED$$, SUB_DELIMS$$), "g"), + NOT_PATH: new RegExp(merge("[^\\%\\/\\:\\@]", UNRESERVED$$, SUB_DELIMS$$), "g"), + NOT_PATH_NOSCHEME: new RegExp(merge("[^\\%\\/\\@]", UNRESERVED$$, SUB_DELIMS$$), "g"), + NOT_QUERY: new RegExp(merge("[^\\%]", UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@\\/\\?]", IPRIVATE$$), "g"), + NOT_FRAGMENT: new RegExp(merge("[^\\%]", UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@\\/\\?]"), "g"), + ESCAPE: new RegExp(merge("[^]", UNRESERVED$$, SUB_DELIMS$$), "g"), + UNRESERVED: new RegExp(UNRESERVED$$, "g"), + OTHER_CHARS: new RegExp(merge("[^\\%]", UNRESERVED$$, RESERVED$$), "g"), + PCT_ENCODED: new RegExp(PCT_ENCODED$, "g"), + IPV4ADDRESS: new RegExp("^(" + IPV4ADDRESS$ + ")$"), + IPV6ADDRESS: new RegExp("^\\[?(" + IPV6ADDRESS$ + ")" + subexp(subexp("\\%25|\\%(?!" + HEXDIG$$ + "{2})") + "(" + ZONEID$ + ")") + "?\\]?$") //RFC 6874, with relaxed parsing rules + }; +} +var URI_PROTOCOL = buildExps(false); + +var IRI_PROTOCOL = buildExps(true); + +var slicedToArray = function () { + function sliceIterator(arr, i) { + var _arr = []; + var _n = true; + var _d = false; + var _e = undefined; + + try { + for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { + _arr.push(_s.value); + + if (i && _arr.length === i) break; + } + } catch (err) { + _d = true; + _e = err; + } finally { + try { + if (!_n && _i["return"]) _i["return"](); + } finally { + if (_d) throw _e; + } + } + + return _arr; + } + + return function (arr, i) { + if (Array.isArray(arr)) { + return arr; + } else if (Symbol.iterator in Object(arr)) { + return sliceIterator(arr, i); + } else { + throw new TypeError("Invalid attempt to destructure non-iterable instance"); + } + }; +}(); + + + + + + + + + + + + + +var toConsumableArray = function (arr) { + if (Array.isArray(arr)) { + for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) arr2[i] = arr[i]; + + return arr2; + } else { + return Array.from(arr); + } +}; + +/** Highest positive signed 32-bit float value */ + +var maxInt = 2147483647; // aka. 0x7FFFFFFF or 2^31-1 + +/** Bootstring parameters */ +var base = 36; +var tMin = 1; +var tMax = 26; +var skew = 38; +var damp = 700; +var initialBias = 72; +var initialN = 128; // 0x80 +var delimiter = '-'; // '\x2D' + +/** Regular expressions */ +var regexPunycode = /^xn--/; +var regexNonASCII = /[^\0-\x7E]/; // non-ASCII chars +var regexSeparators = /[\x2E\u3002\uFF0E\uFF61]/g; // RFC 3490 separators + +/** Error messages */ +var errors = { + 'overflow': 'Overflow: input needs wider integers to process', + 'not-basic': 'Illegal input >= 0x80 (not a basic code point)', + 'invalid-input': 'Invalid input' +}; + +/** Convenience shortcuts */ +var baseMinusTMin = base - tMin; +var floor = Math.floor; +var stringFromCharCode = String.fromCharCode; + +/*--------------------------------------------------------------------------*/ + +/** + * A generic error utility function. + * @private + * @param {String} type The error type. + * @returns {Error} Throws a `RangeError` with the applicable error message. + */ +function error$1(type) { + throw new RangeError(errors[type]); +} + +/** + * A generic `Array#map` utility function. + * @private + * @param {Array} array The array to iterate over. + * @param {Function} callback The function that gets called for every array + * item. + * @returns {Array} A new array of values returned by the callback function. + */ +function map(array, fn) { + var result = []; + var length = array.length; + while (length--) { + result[length] = fn(array[length]); + } + return result; +} + +/** + * A simple `Array#map`-like wrapper to work with domain name strings or email + * addresses. + * @private + * @param {String} domain The domain name or email address. + * @param {Function} callback The function that gets called for every + * character. + * @returns {Array} A new string of characters returned by the callback + * function. + */ +function mapDomain(string, fn) { + var parts = string.split('@'); + var result = ''; + if (parts.length > 1) { + // In email addresses, only the domain name should be punycoded. Leave + // the local part (i.e. everything up to `@`) intact. + result = parts[0] + '@'; + string = parts[1]; + } + // Avoid `split(regex)` for IE8 compatibility. See #17. + string = string.replace(regexSeparators, '\x2E'); + var labels = string.split('.'); + var encoded = map(labels, fn).join('.'); + return result + encoded; +} + +/** + * Creates an array containing the numeric code points of each Unicode + * character in the string. While JavaScript uses UCS-2 internally, + * this function will convert a pair of surrogate halves (each of which + * UCS-2 exposes as separate characters) into a single code point, + * matching UTF-16. + * @see `punycode.ucs2.encode` + * @see + * @memberOf punycode.ucs2 + * @name decode + * @param {String} string The Unicode input string (UCS-2). + * @returns {Array} The new array of code points. + */ +function ucs2decode(string) { + var output = []; + var counter = 0; + var length = string.length; + while (counter < length) { + var value = string.charCodeAt(counter++); + if (value >= 0xD800 && value <= 0xDBFF && counter < length) { + // It's a high surrogate, and there is a next character. + var extra = string.charCodeAt(counter++); + if ((extra & 0xFC00) == 0xDC00) { + // Low surrogate. + output.push(((value & 0x3FF) << 10) + (extra & 0x3FF) + 0x10000); + } else { + // It's an unmatched surrogate; only append this code unit, in case the + // next code unit is the high surrogate of a surrogate pair. + output.push(value); + counter--; + } + } else { + output.push(value); + } + } + return output; +} + +/** + * Creates a string based on an array of numeric code points. + * @see `punycode.ucs2.decode` + * @memberOf punycode.ucs2 + * @name encode + * @param {Array} codePoints The array of numeric code points. + * @returns {String} The new Unicode string (UCS-2). + */ +var ucs2encode = function ucs2encode(array) { + return String.fromCodePoint.apply(String, toConsumableArray(array)); +}; + +/** + * Converts a basic code point into a digit/integer. + * @see `digitToBasic()` + * @private + * @param {Number} codePoint The basic numeric code point value. + * @returns {Number} The numeric value of a basic code point (for use in + * representing integers) in the range `0` to `base - 1`, or `base` if + * the code point does not represent a value. + */ +var basicToDigit = function basicToDigit(codePoint) { + if (codePoint - 0x30 < 0x0A) { + return codePoint - 0x16; + } + if (codePoint - 0x41 < 0x1A) { + return codePoint - 0x41; + } + if (codePoint - 0x61 < 0x1A) { + return codePoint - 0x61; + } + return base; +}; + +/** + * Converts a digit/integer into a basic code point. + * @see `basicToDigit()` + * @private + * @param {Number} digit The numeric value of a basic code point. + * @returns {Number} The basic code point whose value (when used for + * representing integers) is `digit`, which needs to be in the range + * `0` to `base - 1`. If `flag` is non-zero, the uppercase form is + * used; else, the lowercase form is used. The behavior is undefined + * if `flag` is non-zero and `digit` has no uppercase form. + */ +var digitToBasic = function digitToBasic(digit, flag) { + // 0..25 map to ASCII a..z or A..Z + // 26..35 map to ASCII 0..9 + return digit + 22 + 75 * (digit < 26) - ((flag != 0) << 5); +}; + +/** + * Bias adaptation function as per section 3.4 of RFC 3492. + * https://tools.ietf.org/html/rfc3492#section-3.4 + * @private + */ +var adapt = function adapt(delta, numPoints, firstTime) { + var k = 0; + delta = firstTime ? floor(delta / damp) : delta >> 1; + delta += floor(delta / numPoints); + for (; /* no initialization */delta > baseMinusTMin * tMax >> 1; k += base) { + delta = floor(delta / baseMinusTMin); + } + return floor(k + (baseMinusTMin + 1) * delta / (delta + skew)); +}; + +/** + * Converts a Punycode string of ASCII-only symbols to a string of Unicode + * symbols. + * @memberOf punycode + * @param {String} input The Punycode string of ASCII-only symbols. + * @returns {String} The resulting string of Unicode symbols. + */ +var decode = function decode(input) { + // Don't use UCS-2. + var output = []; + var inputLength = input.length; + var i = 0; + var n = initialN; + var bias = initialBias; + + // Handle the basic code points: let `basic` be the number of input code + // points before the last delimiter, or `0` if there is none, then copy + // the first basic code points to the output. + + var basic = input.lastIndexOf(delimiter); + if (basic < 0) { + basic = 0; + } + + for (var j = 0; j < basic; ++j) { + // if it's not a basic code point + if (input.charCodeAt(j) >= 0x80) { + error$1('not-basic'); + } + output.push(input.charCodeAt(j)); + } + + // Main decoding loop: start just after the last delimiter if any basic code + // points were copied; start at the beginning otherwise. + + for (var index = basic > 0 ? basic + 1 : 0; index < inputLength;) /* no final expression */{ + + // `index` is the index of the next character to be consumed. + // Decode a generalized variable-length integer into `delta`, + // which gets added to `i`. The overflow checking is easier + // if we increase `i` as we go, then subtract off its starting + // value at the end to obtain `delta`. + var oldi = i; + for (var w = 1, k = base;; /* no condition */k += base) { + + if (index >= inputLength) { + error$1('invalid-input'); + } + + var digit = basicToDigit(input.charCodeAt(index++)); + + if (digit >= base || digit > floor((maxInt - i) / w)) { + error$1('overflow'); + } + + i += digit * w; + var t = k <= bias ? tMin : k >= bias + tMax ? tMax : k - bias; + + if (digit < t) { + break; + } + + var baseMinusT = base - t; + if (w > floor(maxInt / baseMinusT)) { + error$1('overflow'); + } + + w *= baseMinusT; + } + + var out = output.length + 1; + bias = adapt(i - oldi, out, oldi == 0); + + // `i` was supposed to wrap around from `out` to `0`, + // incrementing `n` each time, so we'll fix that now: + if (floor(i / out) > maxInt - n) { + error$1('overflow'); + } + + n += floor(i / out); + i %= out; + + // Insert `n` at position `i` of the output. + output.splice(i++, 0, n); + } + + return String.fromCodePoint.apply(String, output); +}; + +/** + * Converts a string of Unicode symbols (e.g. a domain name label) to a + * Punycode string of ASCII-only symbols. + * @memberOf punycode + * @param {String} input The string of Unicode symbols. + * @returns {String} The resulting Punycode string of ASCII-only symbols. + */ +var encode = function encode(input) { + var output = []; + + // Convert the input in UCS-2 to an array of Unicode code points. + input = ucs2decode(input); + + // Cache the length. + var inputLength = input.length; + + // Initialize the state. + var n = initialN; + var delta = 0; + var bias = initialBias; + + // Handle the basic code points. + var _iteratorNormalCompletion = true; + var _didIteratorError = false; + var _iteratorError = undefined; + + try { + for (var _iterator = input[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { + var _currentValue2 = _step.value; + + if (_currentValue2 < 0x80) { + output.push(stringFromCharCode(_currentValue2)); + } + } + } catch (err) { + _didIteratorError = true; + _iteratorError = err; + } finally { + try { + if (!_iteratorNormalCompletion && _iterator.return) { + _iterator.return(); + } + } finally { + if (_didIteratorError) { + throw _iteratorError; + } + } + } + + var basicLength = output.length; + var handledCPCount = basicLength; + + // `handledCPCount` is the number of code points that have been handled; + // `basicLength` is the number of basic code points. + + // Finish the basic string with a delimiter unless it's empty. + if (basicLength) { + output.push(delimiter); + } + + // Main encoding loop: + while (handledCPCount < inputLength) { + + // All non-basic code points < n have been handled already. Find the next + // larger one: + var m = maxInt; + var _iteratorNormalCompletion2 = true; + var _didIteratorError2 = false; + var _iteratorError2 = undefined; + + try { + for (var _iterator2 = input[Symbol.iterator](), _step2; !(_iteratorNormalCompletion2 = (_step2 = _iterator2.next()).done); _iteratorNormalCompletion2 = true) { + var currentValue = _step2.value; + + if (currentValue >= n && currentValue < m) { + m = currentValue; + } + } + + // Increase `delta` enough to advance the decoder's state to , + // but guard against overflow. + } catch (err) { + _didIteratorError2 = true; + _iteratorError2 = err; + } finally { + try { + if (!_iteratorNormalCompletion2 && _iterator2.return) { + _iterator2.return(); + } + } finally { + if (_didIteratorError2) { + throw _iteratorError2; + } + } + } + + var handledCPCountPlusOne = handledCPCount + 1; + if (m - n > floor((maxInt - delta) / handledCPCountPlusOne)) { + error$1('overflow'); + } + + delta += (m - n) * handledCPCountPlusOne; + n = m; + + var _iteratorNormalCompletion3 = true; + var _didIteratorError3 = false; + var _iteratorError3 = undefined; + + try { + for (var _iterator3 = input[Symbol.iterator](), _step3; !(_iteratorNormalCompletion3 = (_step3 = _iterator3.next()).done); _iteratorNormalCompletion3 = true) { + var _currentValue = _step3.value; + + if (_currentValue < n && ++delta > maxInt) { + error$1('overflow'); + } + if (_currentValue == n) { + // Represent delta as a generalized variable-length integer. + var q = delta; + for (var k = base;; /* no condition */k += base) { + var t = k <= bias ? tMin : k >= bias + tMax ? tMax : k - bias; + if (q < t) { + break; + } + var qMinusT = q - t; + var baseMinusT = base - t; + output.push(stringFromCharCode(digitToBasic(t + qMinusT % baseMinusT, 0))); + q = floor(qMinusT / baseMinusT); + } + + output.push(stringFromCharCode(digitToBasic(q, 0))); + bias = adapt(delta, handledCPCountPlusOne, handledCPCount == basicLength); + delta = 0; + ++handledCPCount; + } + } + } catch (err) { + _didIteratorError3 = true; + _iteratorError3 = err; + } finally { + try { + if (!_iteratorNormalCompletion3 && _iterator3.return) { + _iterator3.return(); + } + } finally { + if (_didIteratorError3) { + throw _iteratorError3; + } + } + } + + ++delta; + ++n; + } + return output.join(''); +}; + +/** + * Converts a Punycode string representing a domain name or an email address + * to Unicode. Only the Punycoded parts of the input will be converted, i.e. + * it doesn't matter if you call it on a string that has already been + * converted to Unicode. + * @memberOf punycode + * @param {String} input The Punycoded domain name or email address to + * convert to Unicode. + * @returns {String} The Unicode representation of the given Punycode + * string. + */ +var toUnicode = function toUnicode(input) { + return mapDomain(input, function (string) { + return regexPunycode.test(string) ? decode(string.slice(4).toLowerCase()) : string; + }); +}; + +/** + * Converts a Unicode string representing a domain name or an email address to + * Punycode. Only the non-ASCII parts of the domain name will be converted, + * i.e. it doesn't matter if you call it with a domain that's already in + * ASCII. + * @memberOf punycode + * @param {String} input The domain name or email address to convert, as a + * Unicode string. + * @returns {String} The Punycode representation of the given domain name or + * email address. + */ +var toASCII = function toASCII(input) { + return mapDomain(input, function (string) { + return regexNonASCII.test(string) ? 'xn--' + encode(string) : string; + }); +}; + +/*--------------------------------------------------------------------------*/ + +/** Define the public API */ +var punycode = { + /** + * A string representing the current Punycode.js version number. + * @memberOf punycode + * @type String + */ + 'version': '2.1.0', + /** + * An object of methods to convert from JavaScript's internal character + * representation (UCS-2) to Unicode code points, and back. + * @see + * @memberOf punycode + * @type Object + */ + 'ucs2': { + 'decode': ucs2decode, + 'encode': ucs2encode + }, + 'decode': decode, + 'encode': encode, + 'toASCII': toASCII, + 'toUnicode': toUnicode +}; + +/** + * URI.js + * + * @fileoverview An RFC 3986 compliant, scheme extendable URI parsing/validating/resolving library for JavaScript. + * @author Gary Court + * @see http://github.com/garycourt/uri-js + */ +/** + * Copyright 2011 Gary Court. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY GARY COURT ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GARY COURT OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are those of the + * authors and should not be interpreted as representing official policies, either expressed + * or implied, of Gary Court. + */ +var SCHEMES = {}; +function pctEncChar(chr) { + var c = chr.charCodeAt(0); + var e = void 0; + if (c < 16) e = "%0" + c.toString(16).toUpperCase();else if (c < 128) e = "%" + c.toString(16).toUpperCase();else if (c < 2048) e = "%" + (c >> 6 | 192).toString(16).toUpperCase() + "%" + (c & 63 | 128).toString(16).toUpperCase();else e = "%" + (c >> 12 | 224).toString(16).toUpperCase() + "%" + (c >> 6 & 63 | 128).toString(16).toUpperCase() + "%" + (c & 63 | 128).toString(16).toUpperCase(); + return e; +} +function pctDecChars(str) { + var newStr = ""; + var i = 0; + var il = str.length; + while (i < il) { + var c = parseInt(str.substr(i + 1, 2), 16); + if (c < 128) { + newStr += String.fromCharCode(c); + i += 3; + } else if (c >= 194 && c < 224) { + if (il - i >= 6) { + var c2 = parseInt(str.substr(i + 4, 2), 16); + newStr += String.fromCharCode((c & 31) << 6 | c2 & 63); + } else { + newStr += str.substr(i, 6); + } + i += 6; + } else if (c >= 224) { + if (il - i >= 9) { + var _c = parseInt(str.substr(i + 4, 2), 16); + var c3 = parseInt(str.substr(i + 7, 2), 16); + newStr += String.fromCharCode((c & 15) << 12 | (_c & 63) << 6 | c3 & 63); + } else { + newStr += str.substr(i, 9); + } + i += 9; + } else { + newStr += str.substr(i, 3); + i += 3; + } + } + return newStr; +} +function _normalizeComponentEncoding(components, protocol) { + function decodeUnreserved(str) { + var decStr = pctDecChars(str); + return !decStr.match(protocol.UNRESERVED) ? str : decStr; + } + if (components.scheme) components.scheme = String(components.scheme).replace(protocol.PCT_ENCODED, decodeUnreserved).toLowerCase().replace(protocol.NOT_SCHEME, ""); + if (components.userinfo !== undefined) components.userinfo = String(components.userinfo).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_USERINFO, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); + if (components.host !== undefined) components.host = String(components.host).replace(protocol.PCT_ENCODED, decodeUnreserved).toLowerCase().replace(protocol.NOT_HOST, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); + if (components.path !== undefined) components.path = String(components.path).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(components.scheme ? protocol.NOT_PATH : protocol.NOT_PATH_NOSCHEME, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); + if (components.query !== undefined) components.query = String(components.query).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_QUERY, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); + if (components.fragment !== undefined) components.fragment = String(components.fragment).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_FRAGMENT, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); + return components; +} + +function _stripLeadingZeros(str) { + return str.replace(/^0*(.*)/, "$1") || "0"; +} +function _normalizeIPv4(host, protocol) { + var matches = host.match(protocol.IPV4ADDRESS) || []; + + var _matches = slicedToArray(matches, 2), + address = _matches[1]; + + if (address) { + return address.split(".").map(_stripLeadingZeros).join("."); + } else { + return host; + } +} +function _normalizeIPv6(host, protocol) { + var matches = host.match(protocol.IPV6ADDRESS) || []; + + var _matches2 = slicedToArray(matches, 3), + address = _matches2[1], + zone = _matches2[2]; + + if (address) { + var _address$toLowerCase$ = address.toLowerCase().split('::').reverse(), + _address$toLowerCase$2 = slicedToArray(_address$toLowerCase$, 2), + last = _address$toLowerCase$2[0], + first = _address$toLowerCase$2[1]; + + var firstFields = first ? first.split(":").map(_stripLeadingZeros) : []; + var lastFields = last.split(":").map(_stripLeadingZeros); + var isLastFieldIPv4Address = protocol.IPV4ADDRESS.test(lastFields[lastFields.length - 1]); + var fieldCount = isLastFieldIPv4Address ? 7 : 8; + var lastFieldsStart = lastFields.length - fieldCount; + var fields = Array(fieldCount); + for (var x = 0; x < fieldCount; ++x) { + fields[x] = firstFields[x] || lastFields[lastFieldsStart + x] || ''; + } + if (isLastFieldIPv4Address) { + fields[fieldCount - 1] = _normalizeIPv4(fields[fieldCount - 1], protocol); + } + var allZeroFields = fields.reduce(function (acc, field, index) { + if (!field || field === "0") { + var lastLongest = acc[acc.length - 1]; + if (lastLongest && lastLongest.index + lastLongest.length === index) { + lastLongest.length++; + } else { + acc.push({ index: index, length: 1 }); + } + } + return acc; + }, []); + var longestZeroFields = allZeroFields.sort(function (a, b) { + return b.length - a.length; + })[0]; + var newHost = void 0; + if (longestZeroFields && longestZeroFields.length > 1) { + var newFirst = fields.slice(0, longestZeroFields.index); + var newLast = fields.slice(longestZeroFields.index + longestZeroFields.length); + newHost = newFirst.join(":") + "::" + newLast.join(":"); + } else { + newHost = fields.join(":"); + } + if (zone) { + newHost += "%" + zone; + } + return newHost; + } else { + return host; + } +} +var URI_PARSE = /^(?:([^:\/?#]+):)?(?:\/\/((?:([^\/?#@]*)@)?(\[[^\/?#\]]+\]|[^\/?#:]*)(?:\:(\d*))?))?([^?#]*)(?:\?([^#]*))?(?:#((?:.|\n|\r)*))?/i; +var NO_MATCH_IS_UNDEFINED = "".match(/(){0}/)[1] === undefined; +function parse(uriString) { + var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + var components = {}; + var protocol = options.iri !== false ? IRI_PROTOCOL : URI_PROTOCOL; + if (options.reference === "suffix") uriString = (options.scheme ? options.scheme + ":" : "") + "//" + uriString; + var matches = uriString.match(URI_PARSE); + if (matches) { + if (NO_MATCH_IS_UNDEFINED) { + //store each component + components.scheme = matches[1]; + components.userinfo = matches[3]; + components.host = matches[4]; + components.port = parseInt(matches[5], 10); + components.path = matches[6] || ""; + components.query = matches[7]; + components.fragment = matches[8]; + //fix port number + if (isNaN(components.port)) { + components.port = matches[5]; + } + } else { + //IE FIX for improper RegExp matching + //store each component + components.scheme = matches[1] || undefined; + components.userinfo = uriString.indexOf("@") !== -1 ? matches[3] : undefined; + components.host = uriString.indexOf("//") !== -1 ? matches[4] : undefined; + components.port = parseInt(matches[5], 10); + components.path = matches[6] || ""; + components.query = uriString.indexOf("?") !== -1 ? matches[7] : undefined; + components.fragment = uriString.indexOf("#") !== -1 ? matches[8] : undefined; + //fix port number + if (isNaN(components.port)) { + components.port = uriString.match(/\/\/(?:.|\n)*\:(?:\/|\?|\#|$)/) ? matches[4] : undefined; + } + } + if (components.host) { + //normalize IP hosts + components.host = _normalizeIPv6(_normalizeIPv4(components.host, protocol), protocol); + } + //determine reference type + if (components.scheme === undefined && components.userinfo === undefined && components.host === undefined && components.port === undefined && !components.path && components.query === undefined) { + components.reference = "same-document"; + } else if (components.scheme === undefined) { + components.reference = "relative"; + } else if (components.fragment === undefined) { + components.reference = "absolute"; + } else { + components.reference = "uri"; + } + //check for reference errors + if (options.reference && options.reference !== "suffix" && options.reference !== components.reference) { + components.error = components.error || "URI is not a " + options.reference + " reference."; + } + //find scheme handler + var schemeHandler = SCHEMES[(options.scheme || components.scheme || "").toLowerCase()]; + //check if scheme can't handle IRIs + if (!options.unicodeSupport && (!schemeHandler || !schemeHandler.unicodeSupport)) { + //if host component is a domain name + if (components.host && (options.domainHost || schemeHandler && schemeHandler.domainHost)) { + //convert Unicode IDN -> ASCII IDN + try { + components.host = punycode.toASCII(components.host.replace(protocol.PCT_ENCODED, pctDecChars).toLowerCase()); + } catch (e) { + components.error = components.error || "Host's domain name can not be converted to ASCII via punycode: " + e; + } + } + //convert IRI -> URI + _normalizeComponentEncoding(components, URI_PROTOCOL); + } else { + //normalize encodings + _normalizeComponentEncoding(components, protocol); + } + //perform scheme specific parsing + if (schemeHandler && schemeHandler.parse) { + schemeHandler.parse(components, options); + } + } else { + components.error = components.error || "URI can not be parsed."; + } + return components; +} + +function _recomposeAuthority(components, options) { + var protocol = options.iri !== false ? IRI_PROTOCOL : URI_PROTOCOL; + var uriTokens = []; + if (components.userinfo !== undefined) { + uriTokens.push(components.userinfo); + uriTokens.push("@"); + } + if (components.host !== undefined) { + //normalize IP hosts, add brackets and escape zone separator for IPv6 + uriTokens.push(_normalizeIPv6(_normalizeIPv4(String(components.host), protocol), protocol).replace(protocol.IPV6ADDRESS, function (_, $1, $2) { + return "[" + $1 + ($2 ? "%25" + $2 : "") + "]"; + })); + } + if (typeof components.port === "number") { + uriTokens.push(":"); + uriTokens.push(components.port.toString(10)); + } + return uriTokens.length ? uriTokens.join("") : undefined; +} + +var RDS1 = /^\.\.?\//; +var RDS2 = /^\/\.(\/|$)/; +var RDS3 = /^\/\.\.(\/|$)/; +var RDS5 = /^\/?(?:.|\n)*?(?=\/|$)/; +function removeDotSegments(input) { + var output = []; + while (input.length) { + if (input.match(RDS1)) { + input = input.replace(RDS1, ""); + } else if (input.match(RDS2)) { + input = input.replace(RDS2, "/"); + } else if (input.match(RDS3)) { + input = input.replace(RDS3, "/"); + output.pop(); + } else if (input === "." || input === "..") { + input = ""; + } else { + var im = input.match(RDS5); + if (im) { + var s = im[0]; + input = input.slice(s.length); + output.push(s); + } else { + throw new Error("Unexpected dot segment condition"); + } + } + } + return output.join(""); +} + +function serialize(components) { + var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + var protocol = options.iri ? IRI_PROTOCOL : URI_PROTOCOL; + var uriTokens = []; + //find scheme handler + var schemeHandler = SCHEMES[(options.scheme || components.scheme || "").toLowerCase()]; + //perform scheme specific serialization + if (schemeHandler && schemeHandler.serialize) schemeHandler.serialize(components, options); + if (components.host) { + //if host component is an IPv6 address + if (protocol.IPV6ADDRESS.test(components.host)) {} + //TODO: normalize IPv6 address as per RFC 5952 + + //if host component is a domain name + else if (options.domainHost || schemeHandler && schemeHandler.domainHost) { + //convert IDN via punycode + try { + components.host = !options.iri ? punycode.toASCII(components.host.replace(protocol.PCT_ENCODED, pctDecChars).toLowerCase()) : punycode.toUnicode(components.host); + } catch (e) { + components.error = components.error || "Host's domain name can not be converted to " + (!options.iri ? "ASCII" : "Unicode") + " via punycode: " + e; + } + } + } + //normalize encoding + _normalizeComponentEncoding(components, protocol); + if (options.reference !== "suffix" && components.scheme) { + uriTokens.push(components.scheme); + uriTokens.push(":"); + } + var authority = _recomposeAuthority(components, options); + if (authority !== undefined) { + if (options.reference !== "suffix") { + uriTokens.push("//"); + } + uriTokens.push(authority); + if (components.path && components.path.charAt(0) !== "/") { + uriTokens.push("/"); + } + } + if (components.path !== undefined) { + var s = components.path; + if (!options.absolutePath && (!schemeHandler || !schemeHandler.absolutePath)) { + s = removeDotSegments(s); + } + if (authority === undefined) { + s = s.replace(/^\/\//, "/%2F"); //don't allow the path to start with "//" + } + uriTokens.push(s); + } + if (components.query !== undefined) { + uriTokens.push("?"); + uriTokens.push(components.query); + } + if (components.fragment !== undefined) { + uriTokens.push("#"); + uriTokens.push(components.fragment); + } + return uriTokens.join(""); //merge tokens into a string +} + +function resolveComponents(base, relative) { + var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; + var skipNormalization = arguments[3]; + + var target = {}; + if (!skipNormalization) { + base = parse(serialize(base, options), options); //normalize base components + relative = parse(serialize(relative, options), options); //normalize relative components + } + options = options || {}; + if (!options.tolerant && relative.scheme) { + target.scheme = relative.scheme; + //target.authority = relative.authority; + target.userinfo = relative.userinfo; + target.host = relative.host; + target.port = relative.port; + target.path = removeDotSegments(relative.path || ""); + target.query = relative.query; + } else { + if (relative.userinfo !== undefined || relative.host !== undefined || relative.port !== undefined) { + //target.authority = relative.authority; + target.userinfo = relative.userinfo; + target.host = relative.host; + target.port = relative.port; + target.path = removeDotSegments(relative.path || ""); + target.query = relative.query; + } else { + if (!relative.path) { + target.path = base.path; + if (relative.query !== undefined) { + target.query = relative.query; + } else { + target.query = base.query; + } + } else { + if (relative.path.charAt(0) === "/") { + target.path = removeDotSegments(relative.path); + } else { + if ((base.userinfo !== undefined || base.host !== undefined || base.port !== undefined) && !base.path) { + target.path = "/" + relative.path; + } else if (!base.path) { + target.path = relative.path; + } else { + target.path = base.path.slice(0, base.path.lastIndexOf("/") + 1) + relative.path; + } + target.path = removeDotSegments(target.path); + } + target.query = relative.query; + } + //target.authority = base.authority; + target.userinfo = base.userinfo; + target.host = base.host; + target.port = base.port; + } + target.scheme = base.scheme; + } + target.fragment = relative.fragment; + return target; +} + +function resolve(baseURI, relativeURI, options) { + var schemelessOptions = assign({ scheme: 'null' }, options); + return serialize(resolveComponents(parse(baseURI, schemelessOptions), parse(relativeURI, schemelessOptions), schemelessOptions, true), schemelessOptions); +} + +function normalize(uri, options) { + if (typeof uri === "string") { + uri = serialize(parse(uri, options), options); + } else if (typeOf(uri) === "object") { + uri = parse(serialize(uri, options), options); + } + return uri; +} + +function equal(uriA, uriB, options) { + if (typeof uriA === "string") { + uriA = serialize(parse(uriA, options), options); + } else if (typeOf(uriA) === "object") { + uriA = serialize(uriA, options); + } + if (typeof uriB === "string") { + uriB = serialize(parse(uriB, options), options); + } else if (typeOf(uriB) === "object") { + uriB = serialize(uriB, options); + } + return uriA === uriB; +} + +function escapeComponent(str, options) { + return str && str.toString().replace(!options || !options.iri ? URI_PROTOCOL.ESCAPE : IRI_PROTOCOL.ESCAPE, pctEncChar); +} + +function unescapeComponent(str, options) { + return str && str.toString().replace(!options || !options.iri ? URI_PROTOCOL.PCT_ENCODED : IRI_PROTOCOL.PCT_ENCODED, pctDecChars); +} + +var handler = { + scheme: "http", + domainHost: true, + parse: function parse(components, options) { + //report missing host + if (!components.host) { + components.error = components.error || "HTTP URIs must have a host."; + } + return components; + }, + serialize: function serialize(components, options) { + //normalize the default port + if (components.port === (String(components.scheme).toLowerCase() !== "https" ? 80 : 443) || components.port === "") { + components.port = undefined; + } + //normalize the empty path + if (!components.path) { + components.path = "/"; + } + //NOTE: We do not parse query strings for HTTP URIs + //as WWW Form Url Encoded query strings are part of the HTML4+ spec, + //and not the HTTP spec. + return components; + } +}; + +var handler$1 = { + scheme: "https", + domainHost: handler.domainHost, + parse: handler.parse, + serialize: handler.serialize +}; + +var O = {}; +var isIRI = true; +//RFC 3986 +var UNRESERVED$$ = "[A-Za-z0-9\\-\\.\\_\\~" + (isIRI ? "\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF" : "") + "]"; +var HEXDIG$$ = "[0-9A-Fa-f]"; //case-insensitive +var PCT_ENCODED$ = subexp(subexp("%[EFef]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%[89A-Fa-f]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%" + HEXDIG$$ + HEXDIG$$)); //expanded +//RFC 5322, except these symbols as per RFC 6068: @ : / ? # [ ] & ; = +//const ATEXT$$ = "[A-Za-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\-\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~]"; +//const WSP$$ = "[\\x20\\x09]"; +//const OBS_QTEXT$$ = "[\\x01-\\x08\\x0B\\x0C\\x0E-\\x1F\\x7F]"; //(%d1-8 / %d11-12 / %d14-31 / %d127) +//const QTEXT$$ = merge("[\\x21\\x23-\\x5B\\x5D-\\x7E]", OBS_QTEXT$$); //%d33 / %d35-91 / %d93-126 / obs-qtext +//const VCHAR$$ = "[\\x21-\\x7E]"; +//const WSP$$ = "[\\x20\\x09]"; +//const OBS_QP$ = subexp("\\\\" + merge("[\\x00\\x0D\\x0A]", OBS_QTEXT$$)); //%d0 / CR / LF / obs-qtext +//const FWS$ = subexp(subexp(WSP$$ + "*" + "\\x0D\\x0A") + "?" + WSP$$ + "+"); +//const QUOTED_PAIR$ = subexp(subexp("\\\\" + subexp(VCHAR$$ + "|" + WSP$$)) + "|" + OBS_QP$); +//const QUOTED_STRING$ = subexp('\\"' + subexp(FWS$ + "?" + QCONTENT$) + "*" + FWS$ + "?" + '\\"'); +var ATEXT$$ = "[A-Za-z0-9\\!\\$\\%\\'\\*\\+\\-\\^\\_\\`\\{\\|\\}\\~]"; +var QTEXT$$ = "[\\!\\$\\%\\'\\(\\)\\*\\+\\,\\-\\.0-9\\<\\>A-Z\\x5E-\\x7E]"; +var VCHAR$$ = merge(QTEXT$$, "[\\\"\\\\]"); +var SOME_DELIMS$$ = "[\\!\\$\\'\\(\\)\\*\\+\\,\\;\\:\\@]"; +var UNRESERVED = new RegExp(UNRESERVED$$, "g"); +var PCT_ENCODED = new RegExp(PCT_ENCODED$, "g"); +var NOT_LOCAL_PART = new RegExp(merge("[^]", ATEXT$$, "[\\.]", '[\\"]', VCHAR$$), "g"); +var NOT_HFNAME = new RegExp(merge("[^]", UNRESERVED$$, SOME_DELIMS$$), "g"); +var NOT_HFVALUE = NOT_HFNAME; +function decodeUnreserved(str) { + var decStr = pctDecChars(str); + return !decStr.match(UNRESERVED) ? str : decStr; +} +var handler$2 = { + scheme: "mailto", + parse: function parse$$1(components, options) { + var mailtoComponents = components; + var to = mailtoComponents.to = mailtoComponents.path ? mailtoComponents.path.split(",") : []; + mailtoComponents.path = undefined; + if (mailtoComponents.query) { + var unknownHeaders = false; + var headers = {}; + var hfields = mailtoComponents.query.split("&"); + for (var x = 0, xl = hfields.length; x < xl; ++x) { + var hfield = hfields[x].split("="); + switch (hfield[0]) { + case "to": + var toAddrs = hfield[1].split(","); + for (var _x = 0, _xl = toAddrs.length; _x < _xl; ++_x) { + to.push(toAddrs[_x]); + } + break; + case "subject": + mailtoComponents.subject = unescapeComponent(hfield[1], options); + break; + case "body": + mailtoComponents.body = unescapeComponent(hfield[1], options); + break; + default: + unknownHeaders = true; + headers[unescapeComponent(hfield[0], options)] = unescapeComponent(hfield[1], options); + break; + } + } + if (unknownHeaders) mailtoComponents.headers = headers; + } + mailtoComponents.query = undefined; + for (var _x2 = 0, _xl2 = to.length; _x2 < _xl2; ++_x2) { + var addr = to[_x2].split("@"); + addr[0] = unescapeComponent(addr[0]); + if (!options.unicodeSupport) { + //convert Unicode IDN -> ASCII IDN + try { + addr[1] = punycode.toASCII(unescapeComponent(addr[1], options).toLowerCase()); + } catch (e) { + mailtoComponents.error = mailtoComponents.error || "Email address's domain name can not be converted to ASCII via punycode: " + e; + } + } else { + addr[1] = unescapeComponent(addr[1], options).toLowerCase(); + } + to[_x2] = addr.join("@"); + } + return mailtoComponents; + }, + serialize: function serialize$$1(mailtoComponents, options) { + var components = mailtoComponents; + var to = toArray(mailtoComponents.to); + if (to) { + for (var x = 0, xl = to.length; x < xl; ++x) { + var toAddr = String(to[x]); + var atIdx = toAddr.lastIndexOf("@"); + var localPart = toAddr.slice(0, atIdx).replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_LOCAL_PART, pctEncChar); + var domain = toAddr.slice(atIdx + 1); + //convert IDN via punycode + try { + domain = !options.iri ? punycode.toASCII(unescapeComponent(domain, options).toLowerCase()) : punycode.toUnicode(domain); + } catch (e) { + components.error = components.error || "Email address's domain name can not be converted to " + (!options.iri ? "ASCII" : "Unicode") + " via punycode: " + e; + } + to[x] = localPart + "@" + domain; + } + components.path = to.join(","); + } + var headers = mailtoComponents.headers = mailtoComponents.headers || {}; + if (mailtoComponents.subject) headers["subject"] = mailtoComponents.subject; + if (mailtoComponents.body) headers["body"] = mailtoComponents.body; + var fields = []; + for (var name in headers) { + if (headers[name] !== O[name]) { + fields.push(name.replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_HFNAME, pctEncChar) + "=" + headers[name].replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_HFVALUE, pctEncChar)); + } + } + if (fields.length) { + components.query = fields.join("&"); + } + return components; + } +}; + +var URN_PARSE = /^([^\:]+)\:(.*)/; +//RFC 2141 +var handler$3 = { + scheme: "urn", + parse: function parse$$1(components, options) { + var matches = components.path && components.path.match(URN_PARSE); + var urnComponents = components; + if (matches) { + var scheme = options.scheme || urnComponents.scheme || "urn"; + var nid = matches[1].toLowerCase(); + var nss = matches[2]; + var urnScheme = scheme + ":" + (options.nid || nid); + var schemeHandler = SCHEMES[urnScheme]; + urnComponents.nid = nid; + urnComponents.nss = nss; + urnComponents.path = undefined; + if (schemeHandler) { + urnComponents = schemeHandler.parse(urnComponents, options); + } + } else { + urnComponents.error = urnComponents.error || "URN can not be parsed."; + } + return urnComponents; + }, + serialize: function serialize$$1(urnComponents, options) { + var scheme = options.scheme || urnComponents.scheme || "urn"; + var nid = urnComponents.nid; + var urnScheme = scheme + ":" + (options.nid || nid); + var schemeHandler = SCHEMES[urnScheme]; + if (schemeHandler) { + urnComponents = schemeHandler.serialize(urnComponents, options); + } + var uriComponents = urnComponents; + var nss = urnComponents.nss; + uriComponents.path = (nid || options.nid) + ":" + nss; + return uriComponents; + } +}; + +var UUID = /^[0-9A-Fa-f]{8}(?:\-[0-9A-Fa-f]{4}){3}\-[0-9A-Fa-f]{12}$/; +//RFC 4122 +var handler$4 = { + scheme: "urn:uuid", + parse: function parse(urnComponents, options) { + var uuidComponents = urnComponents; + uuidComponents.uuid = uuidComponents.nss; + uuidComponents.nss = undefined; + if (!options.tolerant && (!uuidComponents.uuid || !uuidComponents.uuid.match(UUID))) { + uuidComponents.error = uuidComponents.error || "UUID is not valid."; + } + return uuidComponents; + }, + serialize: function serialize(uuidComponents, options) { + var urnComponents = uuidComponents; + //normalize UUID + urnComponents.nss = (uuidComponents.uuid || "").toLowerCase(); + return urnComponents; + } +}; + +SCHEMES[handler.scheme] = handler; +SCHEMES[handler$1.scheme] = handler$1; +SCHEMES[handler$2.scheme] = handler$2; +SCHEMES[handler$3.scheme] = handler$3; +SCHEMES[handler$4.scheme] = handler$4; + +exports.SCHEMES = SCHEMES; +exports.pctEncChar = pctEncChar; +exports.pctDecChars = pctDecChars; +exports.parse = parse; +exports.removeDotSegments = removeDotSegments; +exports.serialize = serialize; +exports.resolveComponents = resolveComponents; +exports.resolve = resolve; +exports.normalize = normalize; +exports.equal = equal; +exports.escapeComponent = escapeComponent; +exports.unescapeComponent = unescapeComponent; + +Object.defineProperty(exports, '__esModule', { value: true }); + +}))); + + +},{}],"ajv":[function(require,module,exports){ +'use strict'; + +var compileSchema = require('./compile') + , resolve = require('./compile/resolve') + , Cache = require('./cache') + , SchemaObject = require('./compile/schema_obj') + , stableStringify = require('fast-json-stable-stringify') + , formats = require('./compile/formats') + , rules = require('./compile/rules') + , $dataMetaSchema = require('./data') + , util = require('./compile/util'); + +module.exports = Ajv; + +Ajv.prototype.validate = validate; +Ajv.prototype.compile = compile; +Ajv.prototype.addSchema = addSchema; +Ajv.prototype.addMetaSchema = addMetaSchema; +Ajv.prototype.validateSchema = validateSchema; +Ajv.prototype.getSchema = getSchema; +Ajv.prototype.removeSchema = removeSchema; +Ajv.prototype.addFormat = addFormat; +Ajv.prototype.errorsText = errorsText; + +Ajv.prototype._addSchema = _addSchema; +Ajv.prototype._compile = _compile; + +Ajv.prototype.compileAsync = require('./compile/async'); +var customKeyword = require('./keyword'); +Ajv.prototype.addKeyword = customKeyword.add; +Ajv.prototype.getKeyword = customKeyword.get; +Ajv.prototype.removeKeyword = customKeyword.remove; +Ajv.prototype.validateKeyword = customKeyword.validate; + +var errorClasses = require('./compile/error_classes'); +Ajv.ValidationError = errorClasses.Validation; +Ajv.MissingRefError = errorClasses.MissingRef; +Ajv.$dataMetaSchema = $dataMetaSchema; + +var META_SCHEMA_ID = 'http://json-schema.org/draft-07/schema'; + +var META_IGNORE_OPTIONS = [ 'removeAdditional', 'useDefaults', 'coerceTypes', 'strictDefaults' ]; +var META_SUPPORT_DATA = ['/properties']; + +/** + * Creates validator instance. + * Usage: `Ajv(opts)` + * @param {Object} opts optional options + * @return {Object} ajv instance + */ +function Ajv(opts) { + if (!(this instanceof Ajv)) return new Ajv(opts); + opts = this._opts = util.copy(opts) || {}; + setLogger(this); + this._schemas = {}; + this._refs = {}; + this._fragments = {}; + this._formats = formats(opts.format); + + this._cache = opts.cache || new Cache; + this._loadingSchemas = {}; + this._compilations = []; + this.RULES = rules(); + this._getId = chooseGetId(opts); + + opts.loopRequired = opts.loopRequired || Infinity; + if (opts.errorDataPath == 'property') opts._errorDataPathProperty = true; + if (opts.serialize === undefined) opts.serialize = stableStringify; + this._metaOpts = getMetaSchemaOptions(this); + + if (opts.formats) addInitialFormats(this); + addDefaultMetaSchema(this); + if (typeof opts.meta == 'object') this.addMetaSchema(opts.meta); + if (opts.nullable) this.addKeyword('nullable', {metaSchema: {type: 'boolean'}}); + addInitialSchemas(this); +} + + + +/** + * Validate data using schema + * Schema will be compiled and cached (using serialized JSON as key. [fast-json-stable-stringify](https://github.com/epoberezkin/fast-json-stable-stringify) is used to serialize. + * @this Ajv + * @param {String|Object} schemaKeyRef key, ref or schema object + * @param {Any} data to be validated + * @return {Boolean} validation result. Errors from the last validation will be available in `ajv.errors` (and also in compiled schema: `schema.errors`). + */ +function validate(schemaKeyRef, data) { + var v; + if (typeof schemaKeyRef == 'string') { + v = this.getSchema(schemaKeyRef); + if (!v) throw new Error('no schema with key or ref "' + schemaKeyRef + '"'); + } else { + var schemaObj = this._addSchema(schemaKeyRef); + v = schemaObj.validate || this._compile(schemaObj); + } + + var valid = v(data); + if (v.$async !== true) this.errors = v.errors; + return valid; +} + + +/** + * Create validating function for passed schema. + * @this Ajv + * @param {Object} schema schema object + * @param {Boolean} _meta true if schema is a meta-schema. Used internally to compile meta schemas of custom keywords. + * @return {Function} validating function + */ +function compile(schema, _meta) { + var schemaObj = this._addSchema(schema, undefined, _meta); + return schemaObj.validate || this._compile(schemaObj); +} + + +/** + * Adds schema to the instance. + * @this Ajv + * @param {Object|Array} schema schema or array of schemas. If array is passed, `key` and other parameters will be ignored. + * @param {String} key Optional schema key. Can be passed to `validate` method instead of schema object or id/ref. One schema per instance can have empty `id` and `key`. + * @param {Boolean} _skipValidation true to skip schema validation. Used internally, option validateSchema should be used instead. + * @param {Boolean} _meta true if schema is a meta-schema. Used internally, addMetaSchema should be used instead. + * @return {Ajv} this for method chaining + */ +function addSchema(schema, key, _skipValidation, _meta) { + if (Array.isArray(schema)){ + for (var i=0; i} errors optional array of validation errors, if not passed errors from the instance are used. + * @param {Object} options optional options with properties `separator` and `dataVar`. + * @return {String} human readable string with all errors descriptions + */ +function errorsText(errors, options) { + errors = errors || this.errors; + if (!errors) return 'No errors'; + options = options || {}; + var separator = options.separator === undefined ? ', ' : options.separator; + var dataVar = options.dataVar === undefined ? 'data' : options.dataVar; + + var text = ''; + for (var i=0; i%\\^`{|}]|%[0-9a-f]{2})|\{[+#./;?&=,!@|]?(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?(?:,(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?)*\})*$/i,c=/^(?:(?:http[s\u017F]?|ftp):\/\/)(?:(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+(?::(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])*)?@)?(?:(?!10(?:\.[0-9]{1,3}){3})(?!127(?:\.[0-9]{1,3}){3})(?!169\.254(?:\.[0-9]{1,3}){2})(?!192\.168(?:\.[0-9]{1,3}){2})(?!172\.(?:1[6-9]|2[0-9]|3[01])(?:\.[0-9]{1,3}){2})(?:[1-9][0-9]?|1[0-9][0-9]|2[01][0-9]|22[0-3])(?:\.(?:1?[0-9]{1,2}|2[0-4][0-9]|25[0-5])){2}(?:\.(?:[1-9][0-9]?|1[0-9][0-9]|2[0-4][0-9]|25[0-4]))|(?:(?:(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+-?)*(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+)(?:\.(?:(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+-?)*(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+)*(?:\.(?:(?:[KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]){2,})))(?::[0-9]{2,5})?(?:\/(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])*)?$/i,h=/^(?:urn:uuid:)?[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}$/i,d=/^(?:\/(?:[^~/]|~0|~1)*)*$/,f=/^#(?:\/(?:[a-z0-9_\-.!$&'()*+,;:=@]|%[0-9a-f]{2}|~0|~1)*)*$/i,p=/^(?:0|[1-9][0-9]*)(?:#|(?:\/(?:[^~/]|~0|~1)*)*)$/;function m(e){return a.copy(m[e="full"==e?"full":"fast"])}function v(e){var r=e.match(o);if(!r)return!1;var t,a=+r[2],s=+r[3];return 1<=a&&a<=12&&1<=s&&s<=(2!=a||((t=+r[1])%4!=0||t%100==0&&t%400!=0)?i[a]:29)}function y(e,r){var t=e.match(n);if(!t)return!1;var a=t[1],s=t[2],o=t[3];return(a<=23&&s<=59&&o<=59||23==a&&59==s&&60==o)&&(!r||t[5])}(r.exports=m).fast={date:/^\d\d\d\d-[0-1]\d-[0-3]\d$/,time:/^(?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d:\d\d)?$/i,"date-time":/^\d\d\d\d-[0-1]\d-[0-3]\d[t\s](?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d:\d\d)$/i,uri:/^(?:[a-z][a-z0-9+-.]*:)(?:\/?\/)?[^\s]*$/i,"uri-reference":/^(?:(?:[a-z][a-z0-9+-.]*:)?\/?\/)?(?:[^\\\s#][^\s#]*)?(?:#[^\\\s]*)?$/i,"uri-template":u,url:c,email:/^[a-z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?)*$/i,hostname:s,ipv4:/^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/,ipv6:/^\s*(?:(?:(?:[0-9a-f]{1,4}:){7}(?:[0-9a-f]{1,4}|:))|(?:(?:[0-9a-f]{1,4}:){6}(?::[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){5}(?:(?:(?::[0-9a-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){4}(?:(?:(?::[0-9a-f]{1,4}){1,3})|(?:(?::[0-9a-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){3}(?:(?:(?::[0-9a-f]{1,4}){1,4})|(?:(?::[0-9a-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){2}(?:(?:(?::[0-9a-f]{1,4}){1,5})|(?:(?::[0-9a-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){1}(?:(?:(?::[0-9a-f]{1,4}){1,6})|(?:(?::[0-9a-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?::(?:(?:(?::[0-9a-f]{1,4}){1,7})|(?:(?::[0-9a-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(?:%.+)?\s*$/i,regex:w,uuid:h,"json-pointer":d,"json-pointer-uri-fragment":f,"relative-json-pointer":p},m.full={date:v,time:y,"date-time":function(e){var r=e.split(g);return 2==r.length&&v(r[0])&&y(r[1],!0)},uri:function(e){return P.test(e)&&l.test(e)},"uri-reference":/^(?:[a-z][a-z0-9+\-.]*:)?(?:\/?\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9a-f]{2})*@)?(?:\[(?:(?:(?:(?:[0-9a-f]{1,4}:){6}|::(?:[0-9a-f]{1,4}:){5}|(?:[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){4}|(?:(?:[0-9a-f]{1,4}:){0,1}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){3}|(?:(?:[0-9a-f]{1,4}:){0,2}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){2}|(?:(?:[0-9a-f]{1,4}:){0,3}[0-9a-f]{1,4})?::[0-9a-f]{1,4}:|(?:(?:[0-9a-f]{1,4}:){0,4}[0-9a-f]{1,4})?::)(?:[0-9a-f]{1,4}:[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|(?:(?:[0-9a-f]{1,4}:){0,5}[0-9a-f]{1,4})?::[0-9a-f]{1,4}|(?:(?:[0-9a-f]{1,4}:){0,6}[0-9a-f]{1,4})?::)|[Vv][0-9a-f]+\.[a-z0-9\-._~!$&'()*+,;=:]+)\]|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)|(?:[a-z0-9\-._~!$&'"()*+,;=]|%[0-9a-f]{2})*)(?::\d*)?(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*|\/(?:(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*)?|(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*)?(?:\?(?:[a-z0-9\-._~!$&'"()*+,;=:@/?]|%[0-9a-f]{2})*)?(?:#(?:[a-z0-9\-._~!$&'"()*+,;=:@/?]|%[0-9a-f]{2})*)?$/i,"uri-template":u,url:c,email:/^[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$/i,hostname:function(e){return e.length<=255&&s.test(e)},ipv4:/^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/,ipv6:/^\s*(?:(?:(?:[0-9a-f]{1,4}:){7}(?:[0-9a-f]{1,4}|:))|(?:(?:[0-9a-f]{1,4}:){6}(?::[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){5}(?:(?:(?::[0-9a-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){4}(?:(?:(?::[0-9a-f]{1,4}){1,3})|(?:(?::[0-9a-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){3}(?:(?:(?::[0-9a-f]{1,4}){1,4})|(?:(?::[0-9a-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){2}(?:(?:(?::[0-9a-f]{1,4}){1,5})|(?:(?::[0-9a-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){1}(?:(?:(?::[0-9a-f]{1,4}){1,6})|(?:(?::[0-9a-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?::(?:(?:(?::[0-9a-f]{1,4}){1,7})|(?:(?::[0-9a-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(?:%.+)?\s*$/i,regex:w,uuid:h,"json-pointer":d,"json-pointer-uri-fragment":f,"relative-json-pointer":p};var g=/t|\s/i;var P=/\/|:/;var E=/[^\\]\\Z/;function w(e){if(E.test(e))return!1;try{return new RegExp(e),!0}catch(e){return!1}}},{"./util":10}],5:[function(e,r,t){"use strict";var $=e("./resolve"),D=e("./util"),j=e("./error_classes"),O=e("fast-json-stable-stringify"),I=e("../dotjs/validate"),A=D.ucs2length,C=e("fast-deep-equal"),k=j.Validation;function L(e,r,t){var a=s.call(this,e,r,t);return 0<=a?{index:a,compiling:!0}:{index:a=this._compilations.length,compiling:!(this._compilations[a]={schema:e,root:r,baseId:t})}}function z(e,r,t){var a=s.call(this,e,r,t);0<=a&&this._compilations.splice(a,1)}function s(e,r,t){for(var a=0;a",g=f?">":"<",P=void 0;if(v){var E=e.util.getData(m.$data,i,e.dataPathArr),w="exclusive"+o,S="exclType"+o,b="exclIsNumber"+o,_="' + "+(R="op"+o)+" + '";s+=" var schemaExcl"+o+" = "+E+"; ";var F;P=p;(F=F||[]).push(s+=" var "+w+"; var "+S+" = typeof "+(E="schemaExcl"+o)+"; if ("+S+" != 'boolean' && "+S+" != 'undefined' && "+S+" != 'number') { "),s="",!1!==e.createErrors?(s+=" { keyword: '"+(P||"_exclusiveLimit")+"' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(u)+" , params: {} ",!1!==e.opts.messages&&(s+=" , message: '"+p+" should be boolean' "),e.opts.verbose&&(s+=" , schema: validate.schema"+l+" , parentSchema: validate.schema"+e.schemaPath+" , data: "+h+" "),s+=" } "):s+=" {} ";var x=s;s=F.pop(),s+=!e.compositeRule&&c?e.async?" throw new ValidationError(["+x+"]); ":" validate.errors = ["+x+"]; return false; ":" var err = "+x+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",s+=" } else if ( ",d&&(s+=" ("+a+" !== undefined && typeof "+a+" != 'number') || "),s+=" "+S+" == 'number' ? ( ("+w+" = "+a+" === undefined || "+E+" "+y+"= "+a+") ? "+h+" "+g+"= "+E+" : "+h+" "+g+" "+a+" ) : ( ("+w+" = "+E+" === true) ? "+h+" "+g+"= "+a+" : "+h+" "+g+" "+a+" ) || "+h+" !== "+h+") { var op"+o+" = "+w+" ? '"+y+"' : '"+y+"='; ",void 0===n&&(u=e.errSchemaPath+"/"+(P=p),a=E,d=v)}else{_=y;if((b="number"==typeof m)&&d){var R="'"+_+"'";s+=" if ( ",d&&(s+=" ("+a+" !== undefined && typeof "+a+" != 'number') || "),s+=" ( "+a+" === undefined || "+m+" "+y+"= "+a+" ? "+h+" "+g+"= "+m+" : "+h+" "+g+" "+a+" ) || "+h+" !== "+h+") { "}else{b&&void 0===n?(w=!0,u=e.errSchemaPath+"/"+(P=p),a=m,g+="="):(b&&(a=Math[f?"min":"max"](m,n)),m===(!b||a)?(w=!0,u=e.errSchemaPath+"/"+(P=p),g+="="):(w=!1,_+="="));R="'"+_+"'";s+=" if ( ",d&&(s+=" ("+a+" !== undefined && typeof "+a+" != 'number') || "),s+=" "+h+" "+g+" "+a+" || "+h+" !== "+h+") { "}}P=P||r,(F=F||[]).push(s),s="",!1!==e.createErrors?(s+=" { keyword: '"+(P||"_limit")+"' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(u)+" , params: { comparison: "+R+", limit: "+a+", exclusive: "+w+" } ",!1!==e.opts.messages&&(s+=" , message: 'should be "+_+" ",s+=d?"' + "+a:a+"'"),e.opts.verbose&&(s+=" , schema: ",s+=d?"validate.schema"+l:""+n,s+=" , parentSchema: validate.schema"+e.schemaPath+" , data: "+h+" "),s+=" } "):s+=" {} ";x=s;return s=F.pop(),s+=!e.compositeRule&&c?e.async?" throw new ValidationError(["+x+"]); ":" validate.errors = ["+x+"]; return false; ":" var err = "+x+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",s+=" } ",c&&(s+=" else { "),s}},{}],13:[function(e,r,t){"use strict";r.exports=function(e,r,t){var a,s=" ",o=e.level,i=e.dataLevel,n=e.schema[r],l=e.schemaPath+e.util.getProperty(r),u=e.errSchemaPath+"/"+r,c=!e.opts.allErrors,h="data"+(i||""),d=e.opts.$data&&n&&n.$data;a=d?(s+=" var schema"+o+" = "+e.util.getData(n.$data,i,e.dataPathArr)+"; ","schema"+o):n,s+="if ( ",d&&(s+=" ("+a+" !== undefined && typeof "+a+" != 'number') || ");var f=r,p=p||[];p.push(s+=" "+h+".length "+("maxItems"==r?">":"<")+" "+a+") { "),s="",!1!==e.createErrors?(s+=" { keyword: '"+(f||"_limitItems")+"' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(u)+" , params: { limit: "+a+" } ",!1!==e.opts.messages&&(s+=" , message: 'should NOT have ",s+="maxItems"==r?"more":"fewer",s+=" than ",s+=d?"' + "+a+" + '":""+n,s+=" items' "),e.opts.verbose&&(s+=" , schema: ",s+=d?"validate.schema"+l:""+n,s+=" , parentSchema: validate.schema"+e.schemaPath+" , data: "+h+" "),s+=" } "):s+=" {} ";var m=s;return s=p.pop(),s+=!e.compositeRule&&c?e.async?" throw new ValidationError(["+m+"]); ":" validate.errors = ["+m+"]; return false; ":" var err = "+m+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",s+="} ",c&&(s+=" else { "),s}},{}],14:[function(e,r,t){"use strict";r.exports=function(e,r,t){var a,s=" ",o=e.level,i=e.dataLevel,n=e.schema[r],l=e.schemaPath+e.util.getProperty(r),u=e.errSchemaPath+"/"+r,c=!e.opts.allErrors,h="data"+(i||""),d=e.opts.$data&&n&&n.$data;a=d?(s+=" var schema"+o+" = "+e.util.getData(n.$data,i,e.dataPathArr)+"; ","schema"+o):n,s+="if ( ",d&&(s+=" ("+a+" !== undefined && typeof "+a+" != 'number') || "),s+=!1===e.opts.unicode?" "+h+".length ":" ucs2length("+h+") ";var f=r,p=p||[];p.push(s+=" "+("maxLength"==r?">":"<")+" "+a+") { "),s="",!1!==e.createErrors?(s+=" { keyword: '"+(f||"_limitLength")+"' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(u)+" , params: { limit: "+a+" } ",!1!==e.opts.messages&&(s+=" , message: 'should NOT be ",s+="maxLength"==r?"longer":"shorter",s+=" than ",s+=d?"' + "+a+" + '":""+n,s+=" characters' "),e.opts.verbose&&(s+=" , schema: ",s+=d?"validate.schema"+l:""+n,s+=" , parentSchema: validate.schema"+e.schemaPath+" , data: "+h+" "),s+=" } "):s+=" {} ";var m=s;return s=p.pop(),s+=!e.compositeRule&&c?e.async?" throw new ValidationError(["+m+"]); ":" validate.errors = ["+m+"]; return false; ":" var err = "+m+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",s+="} ",c&&(s+=" else { "),s}},{}],15:[function(e,r,t){"use strict";r.exports=function(e,r,t){var a,s=" ",o=e.level,i=e.dataLevel,n=e.schema[r],l=e.schemaPath+e.util.getProperty(r),u=e.errSchemaPath+"/"+r,c=!e.opts.allErrors,h="data"+(i||""),d=e.opts.$data&&n&&n.$data;a=d?(s+=" var schema"+o+" = "+e.util.getData(n.$data,i,e.dataPathArr)+"; ","schema"+o):n,s+="if ( ",d&&(s+=" ("+a+" !== undefined && typeof "+a+" != 'number') || ");var f=r,p=p||[];p.push(s+=" Object.keys("+h+").length "+("maxProperties"==r?">":"<")+" "+a+") { "),s="",!1!==e.createErrors?(s+=" { keyword: '"+(f||"_limitProperties")+"' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(u)+" , params: { limit: "+a+" } ",!1!==e.opts.messages&&(s+=" , message: 'should NOT have ",s+="maxProperties"==r?"more":"fewer",s+=" than ",s+=d?"' + "+a+" + '":""+n,s+=" properties' "),e.opts.verbose&&(s+=" , schema: ",s+=d?"validate.schema"+l:""+n,s+=" , parentSchema: validate.schema"+e.schemaPath+" , data: "+h+" "),s+=" } "):s+=" {} ";var m=s;return s=p.pop(),s+=!e.compositeRule&&c?e.async?" throw new ValidationError(["+m+"]); ":" validate.errors = ["+m+"]; return false; ":" var err = "+m+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",s+="} ",c&&(s+=" else { "),s}},{}],16:[function(e,r,t){"use strict";r.exports=function(e,r,t){var a=" ",s=e.schema[r],o=e.schemaPath+e.util.getProperty(r),i=e.errSchemaPath+"/"+r,n=!e.opts.allErrors,l=e.util.copy(e),u="";l.level++;var c="valid"+l.level,h=l.baseId,d=!0,f=s;if(f)for(var p,m=-1,v=f.length-1;m "+x+") { ";var $=c+"["+x+"]";f.schema=F,f.schemaPath=n+"["+x+"]",f.errSchemaPath=l+"/"+x,f.errorPath=e.util.getPathExpr(e.errorPath,x,e.opts.jsonPointers,!0),f.dataPathArr[y]=x;var D=e.validate(f);f.baseId=P,e.util.varOccurences(D,g)<2?a+=" "+e.util.varReplace(D,g,$)+" ":a+=" var "+g+" = "+$+"; "+D+" ",a+=" } ",u&&(a+=" if ("+m+") { ",p+="}")}if("object"==typeof E&&e.util.schemaHasRules(E,e.RULES.all)){f.schema=E,f.schemaPath=e.schemaPath+".additionalItems",f.errSchemaPath=e.errSchemaPath+"/additionalItems",a+=" "+m+" = true; if ("+c+".length > "+i.length+") { for (var "+v+" = "+i.length+"; "+v+" < "+c+".length; "+v+"++) { ",f.errorPath=e.util.getPathExpr(e.errorPath,v,e.opts.jsonPointers,!0);$=c+"["+v+"]";f.dataPathArr[y]=v;D=e.validate(f);f.baseId=P,e.util.varOccurences(D,g)<2?a+=" "+e.util.varReplace(D,g,$)+" ":a+=" var "+g+" = "+$+"; "+D+" ",u&&(a+=" if (!"+m+") break; "),a+=" } } ",u&&(a+=" if ("+m+") { ",p+="}")}}else if(e.util.schemaHasRules(i,e.RULES.all)){f.schema=i,f.schemaPath=n,f.errSchemaPath=l,a+=" for (var "+v+" = 0; "+v+" < "+c+".length; "+v+"++) { ",f.errorPath=e.util.getPathExpr(e.errorPath,v,e.opts.jsonPointers,!0);$=c+"["+v+"]";f.dataPathArr[y]=v;D=e.validate(f);f.baseId=P,e.util.varOccurences(D,g)<2?a+=" "+e.util.varReplace(D,g,$)+" ":a+=" var "+g+" = "+$+"; "+D+" ",u&&(a+=" if (!"+m+") break; "),a+=" }"}return u&&(a+=" "+p+" if ("+d+" == errors) {"),a=e.util.cleanUpCode(a)}},{}],28:[function(e,r,t){"use strict";r.exports=function(e,r,t){var a,s=" ",o=e.level,i=e.dataLevel,n=e.schema[r],l=e.schemaPath+e.util.getProperty(r),u=e.errSchemaPath+"/"+r,c=!e.opts.allErrors,h="data"+(i||""),d=e.opts.$data&&n&&n.$data;a=d?(s+=" var schema"+o+" = "+e.util.getData(n.$data,i,e.dataPathArr)+"; ","schema"+o):n,s+="var division"+o+";if (",d&&(s+=" "+a+" !== undefined && ( typeof "+a+" != 'number' || "),s+=" (division"+o+" = "+h+" / "+a+", ",s+=e.opts.multipleOfPrecision?" Math.abs(Math.round(division"+o+") - division"+o+") > 1e-"+e.opts.multipleOfPrecision+" ":" division"+o+" !== parseInt(division"+o+") ",s+=" ) ",d&&(s+=" ) ");var f=f||[];f.push(s+=" ) { "),s="",!1!==e.createErrors?(s+=" { keyword: 'multipleOf' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(u)+" , params: { multipleOf: "+a+" } ",!1!==e.opts.messages&&(s+=" , message: 'should be multiple of ",s+=d?"' + "+a:a+"'"),e.opts.verbose&&(s+=" , schema: ",s+=d?"validate.schema"+l:""+n,s+=" , parentSchema: validate.schema"+e.schemaPath+" , data: "+h+" "),s+=" } "):s+=" {} ";var p=s;return s=f.pop(),s+=!e.compositeRule&&c?e.async?" throw new ValidationError(["+p+"]); ":" validate.errors = ["+p+"]; return false; ":" var err = "+p+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",s+="} ",c&&(s+=" else { "),s}},{}],29:[function(e,r,t){"use strict";r.exports=function(e,r,t){var a=" ",s=e.level,o=e.dataLevel,i=e.schema[r],n=e.schemaPath+e.util.getProperty(r),l=e.errSchemaPath+"/"+r,u=!e.opts.allErrors,c="data"+(o||""),h="errs__"+s,d=e.util.copy(e);d.level++;var f="valid"+d.level;if(e.util.schemaHasRules(i,e.RULES.all)){d.schema=i,d.schemaPath=n,d.errSchemaPath=l,a+=" var "+h+" = errors; ";var p,m=e.compositeRule;e.compositeRule=d.compositeRule=!0,d.createErrors=!1,d.opts.allErrors&&(p=d.opts.allErrors,d.opts.allErrors=!1),a+=" "+e.validate(d)+" ",d.createErrors=!0,p&&(d.opts.allErrors=p),e.compositeRule=d.compositeRule=m;var v=v||[];v.push(a+=" if ("+f+") { "),a="",!1!==e.createErrors?(a+=" { keyword: 'not' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(l)+" , params: {} ",!1!==e.opts.messages&&(a+=" , message: 'should NOT be valid' "),e.opts.verbose&&(a+=" , schema: validate.schema"+n+" , parentSchema: validate.schema"+e.schemaPath+" , data: "+c+" "),a+=" } "):a+=" {} ";var y=a;a=v.pop(),a+=!e.compositeRule&&u?e.async?" throw new ValidationError(["+y+"]); ":" validate.errors = ["+y+"]; return false; ":" var err = "+y+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",a+=" } else { errors = "+h+"; if (vErrors !== null) { if ("+h+") vErrors.length = "+h+"; else vErrors = null; } ",e.opts.allErrors&&(a+=" } ")}else a+=" var err = ",!1!==e.createErrors?(a+=" { keyword: 'not' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(l)+" , params: {} ",!1!==e.opts.messages&&(a+=" , message: 'should NOT be valid' "),e.opts.verbose&&(a+=" , schema: validate.schema"+n+" , parentSchema: validate.schema"+e.schemaPath+" , data: "+c+" "),a+=" } "):a+=" {} ",a+="; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",u&&(a+=" if (false) { ");return a}},{}],30:[function(e,r,t){"use strict";r.exports=function(e,r,t){var a=" ",s=e.level,o=e.dataLevel,i=e.schema[r],n=e.schemaPath+e.util.getProperty(r),l=e.errSchemaPath+"/"+r,u=!e.opts.allErrors,c="data"+(o||""),h="valid"+s,d="errs__"+s,f=e.util.copy(e),p="";f.level++;var m="valid"+f.level,v=f.baseId,y="prevValid"+s,g="passingSchemas"+s;a+="var "+d+" = errors , "+y+" = false , "+h+" = false , "+g+" = null; ";var P=e.compositeRule;e.compositeRule=f.compositeRule=!0;var E=i;if(E)for(var w,S=-1,b=E.length-1;S 1) { ";var p=e.schema.items&&e.schema.items.type,m=Array.isArray(p);if(!p||"object"==p||"array"==p||m&&(0<=p.indexOf("object")||0<=p.indexOf("array")))s+=" outer: for (;i--;) { for (j = i; j--;) { if (equal("+h+"[i], "+h+"[j])) { "+d+" = false; break outer; } } } ";else s+=" var itemIndices = {}, item; for (;i--;) { var item = "+h+"[i]; ",s+=" if ("+e.util["checkDataType"+(m?"s":"")](p,"item",!0)+") continue; ",m&&(s+=" if (typeof item == 'string') item = '\"' + item; "),s+=" if (typeof itemIndices[item] == 'number') { "+d+" = false; j = itemIndices[item]; break; } itemIndices[item] = i; } ";s+=" } ",f&&(s+=" } ");var v=v||[];v.push(s+=" if (!"+d+") { "),s="",!1!==e.createErrors?(s+=" { keyword: 'uniqueItems' , dataPath: (dataPath || '') + "+e.errorPath+" , schemaPath: "+e.util.toQuotedString(u)+" , params: { i: i, j: j } ",!1!==e.opts.messages&&(s+=" , message: 'should NOT have duplicate items (items ## ' + j + ' and ' + i + ' are identical)' "),e.opts.verbose&&(s+=" , schema: ",s+=f?"validate.schema"+l:""+n,s+=" , parentSchema: validate.schema"+e.schemaPath+" , data: "+h+" "),s+=" } "):s+=" {} ";var y=s;s=v.pop(),s+=!e.compositeRule&&c?e.async?" throw new ValidationError(["+y+"]); ":" validate.errors = ["+y+"]; return false; ":" var err = "+y+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ",s+=" } ",c&&(s+=" else { ")}else c&&(s+=" if (true) { ");return s}},{}],37:[function(e,r,t){"use strict";r.exports=function(a,e,r){var t="",s=!0===a.schema.$async,o=a.util.schemaHasRulesExcept(a.schema,a.RULES.all,"$ref"),i=a.self._getId(a.schema);if(a.opts.strictKeywords){var n=a.util.schemaUnknownRules(a.schema,a.RULES.keywords);if(n){var l="unknown keyword: "+n;if("log"!==a.opts.strictKeywords)throw new Error(l);a.logger.warn(l)}}if(a.isTop&&(t+=" var validate = ",s&&(a.async=!0,t+="async "),t+="function(data, dataPath, parentData, parentDataProperty, rootData) { 'use strict'; ",i&&(a.opts.sourceCode||a.opts.processCode)&&(t+=" /*# sourceURL="+i+" */ ")),"boolean"==typeof a.schema||!o&&!a.schema.$ref){var u=a.level,c=a.dataLevel,h=a.schema[e="false schema"],d=a.schemaPath+a.util.getProperty(e),f=a.errSchemaPath+"/"+e,p=!a.opts.allErrors,m="data"+(c||""),v="valid"+u;if(!1===a.schema){a.isTop?p=!0:t+=" var "+v+" = false; ",(G=G||[]).push(t),t="",!1!==a.createErrors?(t+=" { keyword: 'false schema' , dataPath: (dataPath || '') + "+a.errorPath+" , schemaPath: "+a.util.toQuotedString(f)+" , params: {} ",!1!==a.opts.messages&&(t+=" , message: 'boolean schema is false' "),a.opts.verbose&&(t+=" , schema: false , parentSchema: validate.schema"+a.schemaPath+" , data: "+m+" "),t+=" } "):t+=" {} ";var y=t;t=G.pop(),t+=!a.compositeRule&&p?a.async?" throw new ValidationError(["+y+"]); ":" validate.errors = ["+y+"]; return false; ":" var err = "+y+"; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; "}else t+=a.isTop?s?" return data; ":" validate.errors = null; return true; ":" var "+v+" = true; ";return a.isTop&&(t+=" }; return validate; "),t}if(a.isTop){var g=a.isTop;u=a.level=0,c=a.dataLevel=0,m="data";if(a.rootId=a.resolve.fullPath(a.self._getId(a.root.schema)),a.baseId=a.baseId||a.rootId,delete a.isTop,a.dataPathArr=[void 0],void 0!==a.schema.default&&a.opts.useDefaults&&a.opts.strictDefaults){var P="default is ignored in the schema root";if("log"!==a.opts.strictDefaults)throw new Error(P);a.logger.warn(P)}t+=" var vErrors = null; ",t+=" var errors = 0; ",t+=" if (rootData === undefined) rootData = data; "}else{u=a.level,m="data"+((c=a.dataLevel)||"");if(i&&(a.baseId=a.resolve.url(a.baseId,i)),s&&!a.async)throw new Error("async schema in sync schema");t+=" var errs_"+u+" = errors;"}v="valid"+u,p=!a.opts.allErrors;var E="",w="",S=a.schema.type,b=Array.isArray(S);if(S&&a.opts.nullable&&!0===a.schema.nullable&&(b?-1==S.indexOf("null")&&(S=S.concat("null")):"null"!=S&&(S=[S,"null"],b=!0)),b&&1==S.length&&(S=S[0],b=!1),a.schema.$ref&&o){if("fail"==a.opts.extendRefs)throw new Error('$ref: validation keywords used in schema at path "'+a.errSchemaPath+'" (see option extendRefs)');!0!==a.opts.extendRefs&&(o=!1,a.logger.warn('$ref: keywords ignored in schema at path "'+a.errSchemaPath+'"'))}if(a.schema.$comment&&a.opts.$comment&&(t+=" "+a.RULES.all.$comment.code(a,"$comment")),S){if(a.opts.coerceTypes)var _=a.util.coerceToTypes(a.opts.coerceTypes,S);var F=a.RULES.types[S];if(_||b||!0===F||F&&!Y(F)){d=a.schemaPath+".type",f=a.errSchemaPath+"/type",d=a.schemaPath+".type",f=a.errSchemaPath+"/type";if(t+=" if ("+a.util[b?"checkDataTypes":"checkDataType"](S,m,!0)+") { ",_){var x="dataType"+u,R="coerced"+u;t+=" var "+x+" = typeof "+m+"; ","array"==a.opts.coerceTypes&&(t+=" if ("+x+" == 'object' && Array.isArray("+m+")) "+x+" = 'array'; "),t+=" var "+R+" = undefined; ";var $="",D=_;if(D)for(var j,O=-1,I=D.length-1;O= 0x80 (not a basic code point)","invalid-input":"Invalid input"},L=Math.floor,z=String.fromCharCode;function T(e){throw new RangeError(i[e])}function n(e,r){var t=e.split("@"),a="";return 1>1,e+=L(e/r);455L((A-s)/h))&&T("overflow"),s+=f*h;var p=d<=i?1:i+26<=d?26:d-i;if(fL(A/m)&&T("overflow"),h*=m}var v=t.length+1;i=Q(s-c,v,0==c),L(s/v)>A-o&&T("overflow"),o+=L(s/v),s%=v,t.splice(s++,0,o)}return String.fromCodePoint.apply(String,t)},u=function(e){var r=[],t=(e=q(e)).length,a=128,s=0,o=72,i=!0,n=!1,l=void 0;try{for(var u,c=e[Symbol.iterator]();!(i=(u=c.next()).done);i=!0){var h=u.value;h<128&&r.push(z(h))}}catch(e){n=!0,l=e}finally{try{!i&&c.return&&c.return()}finally{if(n)throw l}}var d=r.length,f=d;for(d&&r.push("-");fL((A-s)/w)&&T("overflow"),s+=(p-a)*w,a=p;var S=!0,b=!1,_=void 0;try{for(var F,x=e[Symbol.iterator]();!(S=(F=x.next()).done);S=!0){var R=F.value;if(RA&&T("overflow"),R==a){for(var $=s,D=36;;D+=36){var j=D<=o?1:o+26<=D?26:D-o;if($>6|192).toString(16).toUpperCase()+"%"+(63&r|128).toString(16).toUpperCase():"%"+(r>>12|224).toString(16).toUpperCase()+"%"+(r>>6&63|128).toString(16).toUpperCase()+"%"+(63&r|128).toString(16).toUpperCase()}function f(e){for(var r="",t=0,a=e.length;tA-Z\\x5E-\\x7E]",'[\\"\\\\]'),Z=new RegExp(M,"g"),G=new RegExp(B,"g"),Y=new RegExp(C("[^]","[A-Za-z0-9\\!\\$\\%\\'\\*\\+\\-\\^\\_\\`\\{\\|\\}\\~]","[\\.]",'[\\"]',J),"g"),W=new RegExp(C("[^]",M,"[\\!\\$\\'\\(\\)\\*\\+\\,\\;\\:\\@]"),"g"),X=W;function ee(e){var r=f(e);return r.match(Z)?r:e}var re={scheme:"mailto",parse:function(e,r){var t=e,a=t.to=t.path?t.path.split(","):[];if(t.path=void 0,t.query){for(var s=!1,o={},i=t.query.split("&"),n=0,l=i.length;n); + + message: string; + errors: Array; + ajv: true; + validation: true; + } + + class MissingRefError extends Error { + constructor(baseId: string, ref: string, message?: string); + static message: (baseId: string, ref: string) => string; + + message: string; + missingRef: string; + missingSchema: string; + } +} + +declare namespace ajv { + type ValidationError = AjvErrors.ValidationError; + + type MissingRefError = AjvErrors.MissingRefError; + + interface Ajv { + /** + * Validate data using schema + * Schema will be compiled and cached (using serialized JSON as key, [fast-json-stable-stringify](https://github.com/epoberezkin/fast-json-stable-stringify) is used to serialize by default). + * @param {string|object|Boolean} schemaKeyRef key, ref or schema object + * @param {Any} data to be validated + * @return {Boolean} validation result. Errors from the last validation will be available in `ajv.errors` (and also in compiled schema: `schema.errors`). + */ + validate(schemaKeyRef: object | string | boolean, data: any): boolean | PromiseLike; + /** + * Create validating function for passed schema. + * @param {object|Boolean} schema schema object + * @return {Function} validating function + */ + compile(schema: object | boolean): ValidateFunction; + /** + * Creates validating function for passed schema with asynchronous loading of missing schemas. + * `loadSchema` option should be a function that accepts schema uri and node-style callback. + * @this Ajv + * @param {object|Boolean} schema schema object + * @param {Boolean} meta optional true to compile meta-schema; this parameter can be skipped + * @param {Function} callback optional node-style callback, it is always called with 2 parameters: error (or null) and validating function. + * @return {PromiseLike} validating function + */ + compileAsync(schema: object | boolean, meta?: Boolean, callback?: (err: Error, validate: ValidateFunction) => any): PromiseLike; + /** + * Adds schema to the instance. + * @param {object|Array} schema schema or array of schemas. If array is passed, `key` and other parameters will be ignored. + * @param {string} key Optional schema key. Can be passed to `validate` method instead of schema object or id/ref. One schema per instance can have empty `id` and `key`. + * @return {Ajv} this for method chaining + */ + addSchema(schema: Array | object, key?: string): Ajv; + /** + * Add schema that will be used to validate other schemas + * options in META_IGNORE_OPTIONS are alway set to false + * @param {object} schema schema object + * @param {string} key optional schema key + * @return {Ajv} this for method chaining + */ + addMetaSchema(schema: object, key?: string): Ajv; + /** + * Validate schema + * @param {object|Boolean} schema schema to validate + * @return {Boolean} true if schema is valid + */ + validateSchema(schema: object | boolean): boolean; + /** + * Get compiled schema from the instance by `key` or `ref`. + * @param {string} keyRef `key` that was passed to `addSchema` or full schema reference (`schema.id` or resolved id). + * @return {Function} schema validating function (with property `schema`). + */ + getSchema(keyRef: string): ValidateFunction; + /** + * Remove cached schema(s). + * If no parameter is passed all schemas but meta-schemas are removed. + * If RegExp is passed all schemas with key/id matching pattern but meta-schemas are removed. + * Even if schema is referenced by other schemas it still can be removed as other schemas have local references. + * @param {string|object|RegExp|Boolean} schemaKeyRef key, ref, pattern to match key/ref or schema object + * @return {Ajv} this for method chaining + */ + removeSchema(schemaKeyRef?: object | string | RegExp | boolean): Ajv; + /** + * Add custom format + * @param {string} name format name + * @param {string|RegExp|Function} format string is converted to RegExp; function should return boolean (true when valid) + * @return {Ajv} this for method chaining + */ + addFormat(name: string, format: FormatValidator | FormatDefinition): Ajv; + /** + * Define custom keyword + * @this Ajv + * @param {string} keyword custom keyword, should be a valid identifier, should be different from all standard, custom and macro keywords. + * @param {object} definition keyword definition object with properties `type` (type(s) which the keyword applies to), `validate` or `compile`. + * @return {Ajv} this for method chaining + */ + addKeyword(keyword: string, definition: KeywordDefinition): Ajv; + /** + * Get keyword definition + * @this Ajv + * @param {string} keyword pre-defined or custom keyword. + * @return {object|Boolean} custom keyword definition, `true` if it is a predefined keyword, `false` otherwise. + */ + getKeyword(keyword: string): object | boolean; + /** + * Remove keyword + * @this Ajv + * @param {string} keyword pre-defined or custom keyword. + * @return {Ajv} this for method chaining + */ + removeKeyword(keyword: string): Ajv; + /** + * Validate keyword + * @this Ajv + * @param {object} definition keyword definition object + * @param {boolean} throwError true to throw exception if definition is invalid + * @return {boolean} validation result + */ + validateKeyword(definition: KeywordDefinition, throwError: boolean): boolean; + /** + * Convert array of error message objects to string + * @param {Array} errors optional array of validation errors, if not passed errors from the instance are used. + * @param {object} options optional options with properties `separator` and `dataVar`. + * @return {string} human readable string with all errors descriptions + */ + errorsText(errors?: Array | null, options?: ErrorsTextOptions): string; + errors?: Array | null; + } + + interface CustomLogger { + log(...args: any[]): any; + warn(...args: any[]): any; + error(...args: any[]): any; + } + + interface ValidateFunction { + ( + data: any, + dataPath?: string, + parentData?: object | Array, + parentDataProperty?: string | number, + rootData?: object | Array + ): boolean | PromiseLike; + schema?: object | boolean; + errors?: null | Array; + refs?: object; + refVal?: Array; + root?: ValidateFunction | object; + $async?: true; + source?: object; + } + + interface Options { + $data?: boolean; + allErrors?: boolean; + verbose?: boolean; + jsonPointers?: boolean; + uniqueItems?: boolean; + unicode?: boolean; + format?: string; + formats?: object; + unknownFormats?: true | string[] | 'ignore'; + schemas?: Array | object; + schemaId?: '$id' | 'id' | 'auto'; + missingRefs?: true | 'ignore' | 'fail'; + extendRefs?: true | 'ignore' | 'fail'; + loadSchema?: (uri: string, cb?: (err: Error, schema: object) => void) => PromiseLike; + removeAdditional?: boolean | 'all' | 'failing'; + useDefaults?: boolean | 'shared'; + coerceTypes?: boolean | 'array'; + strictDefaults?: boolean | 'log'; + async?: boolean | string; + transpile?: string | ((code: string) => string); + meta?: boolean | object; + validateSchema?: boolean | 'log'; + addUsedSchema?: boolean; + inlineRefs?: boolean | number; + passContext?: boolean; + loopRequired?: number; + ownProperties?: boolean; + multipleOfPrecision?: boolean | number; + errorDataPath?: string, + messages?: boolean; + sourceCode?: boolean; + processCode?: (code: string) => string; + cache?: object; + logger?: CustomLogger | false; + nullable?: boolean; + serialize?: ((schema: object | boolean) => any) | false; + } + + type FormatValidator = string | RegExp | ((data: string) => boolean | PromiseLike); + type NumberFormatValidator = ((data: number) => boolean | PromiseLike); + + interface NumberFormatDefinition { + type: "number", + validate: NumberFormatValidator; + compare?: (data1: number, data2: number) => number; + async?: boolean; + } + + interface StringFormatDefinition { + type?: "string", + validate: FormatValidator; + compare?: (data1: string, data2: string) => number; + async?: boolean; + } + + type FormatDefinition = NumberFormatDefinition | StringFormatDefinition; + + interface KeywordDefinition { + type?: string | Array; + async?: boolean; + $data?: boolean; + errors?: boolean | string; + metaSchema?: object; + // schema: false makes validate not to expect schema (ValidateFunction) + schema?: boolean; + statements?: boolean; + dependencies?: Array; + modifying?: boolean; + valid?: boolean; + // one and only one of the following properties should be present + validate?: SchemaValidateFunction | ValidateFunction; + compile?: (schema: any, parentSchema: object, it: CompilationContext) => ValidateFunction; + macro?: (schema: any, parentSchema: object, it: CompilationContext) => object | boolean; + inline?: (it: CompilationContext, keyword: string, schema: any, parentSchema: object) => string; + } + + interface CompilationContext { + level: number; + dataLevel: number; + schema: any; + schemaPath: string; + baseId: string; + async: boolean; + opts: Options; + formats: { + [index: string]: FormatDefinition | undefined; + }; + compositeRule: boolean; + validate: (schema: object) => boolean; + util: { + copy(obj: any, target?: any): any; + toHash(source: string[]): { [index: string]: true | undefined }; + equal(obj: any, target: any): boolean; + getProperty(str: string): string; + schemaHasRules(schema: object, rules: any): string; + escapeQuotes(str: string): string; + toQuotedString(str: string): string; + getData(jsonPointer: string, dataLevel: number, paths: string[]): string; + escapeJsonPointer(str: string): string; + unescapeJsonPointer(str: string): string; + escapeFragment(str: string): string; + unescapeFragment(str: string): string; + }; + self: Ajv; + } + + interface SchemaValidateFunction { + ( + schema: any, + data: any, + parentSchema?: object, + dataPath?: string, + parentData?: object | Array, + parentDataProperty?: string | number, + rootData?: object | Array + ): boolean | PromiseLike; + errors?: Array; + } + + interface ErrorsTextOptions { + separator?: string; + dataVar?: string; + } + + interface ErrorObject { + keyword: string; + dataPath: string; + schemaPath: string; + params: ErrorParameters; + // Added to validation errors of propertyNames keyword schema + propertyName?: string; + // Excluded if messages set to false. + message?: string; + // These are added with the `verbose` option. + schema?: any; + parentSchema?: object; + data?: any; + } + + type ErrorParameters = RefParams | LimitParams | AdditionalPropertiesParams | + DependenciesParams | FormatParams | ComparisonParams | + MultipleOfParams | PatternParams | RequiredParams | + TypeParams | UniqueItemsParams | CustomParams | + PatternRequiredParams | PropertyNamesParams | + IfParams | SwitchParams | NoParams | EnumParams; + + interface RefParams { + ref: string; + } + + interface LimitParams { + limit: number; + } + + interface AdditionalPropertiesParams { + additionalProperty: string; + } + + interface DependenciesParams { + property: string; + missingProperty: string; + depsCount: number; + deps: string; + } + + interface FormatParams { + format: string + } + + interface ComparisonParams { + comparison: string; + limit: number | string; + exclusive: boolean; + } + + interface MultipleOfParams { + multipleOf: number; + } + + interface PatternParams { + pattern: string; + } + + interface RequiredParams { + missingProperty: string; + } + + interface TypeParams { + type: string; + } + + interface UniqueItemsParams { + i: number; + j: number; + } + + interface CustomParams { + keyword: string; + } + + interface PatternRequiredParams { + missingPattern: string; + } + + interface PropertyNamesParams { + propertyName: string; + } + + interface IfParams { + failingKeyword: string; + } + + interface SwitchParams { + caseIndex: number; + } + + interface NoParams { } + + interface EnumParams { + allowedValues: Array; + } +} + +export = ajv; diff --git a/src/node_modules/ajv/lib/ajv.js b/src/node_modules/ajv/lib/ajv.js new file mode 100644 index 0000000..611b938 --- /dev/null +++ b/src/node_modules/ajv/lib/ajv.js @@ -0,0 +1,497 @@ +'use strict'; + +var compileSchema = require('./compile') + , resolve = require('./compile/resolve') + , Cache = require('./cache') + , SchemaObject = require('./compile/schema_obj') + , stableStringify = require('fast-json-stable-stringify') + , formats = require('./compile/formats') + , rules = require('./compile/rules') + , $dataMetaSchema = require('./data') + , util = require('./compile/util'); + +module.exports = Ajv; + +Ajv.prototype.validate = validate; +Ajv.prototype.compile = compile; +Ajv.prototype.addSchema = addSchema; +Ajv.prototype.addMetaSchema = addMetaSchema; +Ajv.prototype.validateSchema = validateSchema; +Ajv.prototype.getSchema = getSchema; +Ajv.prototype.removeSchema = removeSchema; +Ajv.prototype.addFormat = addFormat; +Ajv.prototype.errorsText = errorsText; + +Ajv.prototype._addSchema = _addSchema; +Ajv.prototype._compile = _compile; + +Ajv.prototype.compileAsync = require('./compile/async'); +var customKeyword = require('./keyword'); +Ajv.prototype.addKeyword = customKeyword.add; +Ajv.prototype.getKeyword = customKeyword.get; +Ajv.prototype.removeKeyword = customKeyword.remove; +Ajv.prototype.validateKeyword = customKeyword.validate; + +var errorClasses = require('./compile/error_classes'); +Ajv.ValidationError = errorClasses.Validation; +Ajv.MissingRefError = errorClasses.MissingRef; +Ajv.$dataMetaSchema = $dataMetaSchema; + +var META_SCHEMA_ID = 'http://json-schema.org/draft-07/schema'; + +var META_IGNORE_OPTIONS = [ 'removeAdditional', 'useDefaults', 'coerceTypes', 'strictDefaults' ]; +var META_SUPPORT_DATA = ['/properties']; + +/** + * Creates validator instance. + * Usage: `Ajv(opts)` + * @param {Object} opts optional options + * @return {Object} ajv instance + */ +function Ajv(opts) { + if (!(this instanceof Ajv)) return new Ajv(opts); + opts = this._opts = util.copy(opts) || {}; + setLogger(this); + this._schemas = {}; + this._refs = {}; + this._fragments = {}; + this._formats = formats(opts.format); + + this._cache = opts.cache || new Cache; + this._loadingSchemas = {}; + this._compilations = []; + this.RULES = rules(); + this._getId = chooseGetId(opts); + + opts.loopRequired = opts.loopRequired || Infinity; + if (opts.errorDataPath == 'property') opts._errorDataPathProperty = true; + if (opts.serialize === undefined) opts.serialize = stableStringify; + this._metaOpts = getMetaSchemaOptions(this); + + if (opts.formats) addInitialFormats(this); + addDefaultMetaSchema(this); + if (typeof opts.meta == 'object') this.addMetaSchema(opts.meta); + if (opts.nullable) this.addKeyword('nullable', {metaSchema: {type: 'boolean'}}); + addInitialSchemas(this); +} + + + +/** + * Validate data using schema + * Schema will be compiled and cached (using serialized JSON as key. [fast-json-stable-stringify](https://github.com/epoberezkin/fast-json-stable-stringify) is used to serialize. + * @this Ajv + * @param {String|Object} schemaKeyRef key, ref or schema object + * @param {Any} data to be validated + * @return {Boolean} validation result. Errors from the last validation will be available in `ajv.errors` (and also in compiled schema: `schema.errors`). + */ +function validate(schemaKeyRef, data) { + var v; + if (typeof schemaKeyRef == 'string') { + v = this.getSchema(schemaKeyRef); + if (!v) throw new Error('no schema with key or ref "' + schemaKeyRef + '"'); + } else { + var schemaObj = this._addSchema(schemaKeyRef); + v = schemaObj.validate || this._compile(schemaObj); + } + + var valid = v(data); + if (v.$async !== true) this.errors = v.errors; + return valid; +} + + +/** + * Create validating function for passed schema. + * @this Ajv + * @param {Object} schema schema object + * @param {Boolean} _meta true if schema is a meta-schema. Used internally to compile meta schemas of custom keywords. + * @return {Function} validating function + */ +function compile(schema, _meta) { + var schemaObj = this._addSchema(schema, undefined, _meta); + return schemaObj.validate || this._compile(schemaObj); +} + + +/** + * Adds schema to the instance. + * @this Ajv + * @param {Object|Array} schema schema or array of schemas. If array is passed, `key` and other parameters will be ignored. + * @param {String} key Optional schema key. Can be passed to `validate` method instead of schema object or id/ref. One schema per instance can have empty `id` and `key`. + * @param {Boolean} _skipValidation true to skip schema validation. Used internally, option validateSchema should be used instead. + * @param {Boolean} _meta true if schema is a meta-schema. Used internally, addMetaSchema should be used instead. + * @return {Ajv} this for method chaining + */ +function addSchema(schema, key, _skipValidation, _meta) { + if (Array.isArray(schema)){ + for (var i=0; i} errors optional array of validation errors, if not passed errors from the instance are used. + * @param {Object} options optional options with properties `separator` and `dataVar`. + * @return {String} human readable string with all errors descriptions + */ +function errorsText(errors, options) { + errors = errors || this.errors; + if (!errors) return 'No errors'; + options = options || {}; + var separator = options.separator === undefined ? ', ' : options.separator; + var dataVar = options.dataVar === undefined ? 'data' : options.dataVar; + + var text = ''; + for (var i=0; i%\\^`{|}]|%[0-9a-f]{2})|\{[+#./;?&=,!@|]?(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?(?:,(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?)*\})*$/i; +// For the source: https://gist.github.com/dperini/729294 +// For test cases: https://mathiasbynens.be/demo/url-regex +// @todo Delete current URL in favour of the commented out URL rule when this issue is fixed https://github.com/eslint/eslint/issues/7983. +// var URL = /^(?:(?:https?|ftp):\/\/)(?:\S+(?::\S*)?@)?(?:(?!10(?:\.\d{1,3}){3})(?!127(?:\.\d{1,3}){3})(?!169\.254(?:\.\d{1,3}){2})(?!192\.168(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z\u{00a1}-\u{ffff}0-9]+-?)*[a-z\u{00a1}-\u{ffff}0-9]+)(?:\.(?:[a-z\u{00a1}-\u{ffff}0-9]+-?)*[a-z\u{00a1}-\u{ffff}0-9]+)*(?:\.(?:[a-z\u{00a1}-\u{ffff}]{2,})))(?::\d{2,5})?(?:\/[^\s]*)?$/iu; +var URL = /^(?:(?:http[s\u017F]?|ftp):\/\/)(?:(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+(?::(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])*)?@)?(?:(?!10(?:\.[0-9]{1,3}){3})(?!127(?:\.[0-9]{1,3}){3})(?!169\.254(?:\.[0-9]{1,3}){2})(?!192\.168(?:\.[0-9]{1,3}){2})(?!172\.(?:1[6-9]|2[0-9]|3[01])(?:\.[0-9]{1,3}){2})(?:[1-9][0-9]?|1[0-9][0-9]|2[01][0-9]|22[0-3])(?:\.(?:1?[0-9]{1,2}|2[0-4][0-9]|25[0-5])){2}(?:\.(?:[1-9][0-9]?|1[0-9][0-9]|2[0-4][0-9]|25[0-4]))|(?:(?:(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+-?)*(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+)(?:\.(?:(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+-?)*(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+)*(?:\.(?:(?:[KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]){2,})))(?::[0-9]{2,5})?(?:\/(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])*)?$/i; +var UUID = /^(?:urn:uuid:)?[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}$/i; +var JSON_POINTER = /^(?:\/(?:[^~/]|~0|~1)*)*$/; +var JSON_POINTER_URI_FRAGMENT = /^#(?:\/(?:[a-z0-9_\-.!$&'()*+,;:=@]|%[0-9a-f]{2}|~0|~1)*)*$/i; +var RELATIVE_JSON_POINTER = /^(?:0|[1-9][0-9]*)(?:#|(?:\/(?:[^~/]|~0|~1)*)*)$/; + + +module.exports = formats; + +function formats(mode) { + mode = mode == 'full' ? 'full' : 'fast'; + return util.copy(formats[mode]); +} + + +formats.fast = { + // date: http://tools.ietf.org/html/rfc3339#section-5.6 + date: /^\d\d\d\d-[0-1]\d-[0-3]\d$/, + // date-time: http://tools.ietf.org/html/rfc3339#section-5.6 + time: /^(?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d:\d\d)?$/i, + 'date-time': /^\d\d\d\d-[0-1]\d-[0-3]\d[t\s](?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d:\d\d)$/i, + // uri: https://github.com/mafintosh/is-my-json-valid/blob/master/formats.js + uri: /^(?:[a-z][a-z0-9+-.]*:)(?:\/?\/)?[^\s]*$/i, + 'uri-reference': /^(?:(?:[a-z][a-z0-9+-.]*:)?\/?\/)?(?:[^\\\s#][^\s#]*)?(?:#[^\\\s]*)?$/i, + 'uri-template': URITEMPLATE, + url: URL, + // email (sources from jsen validator): + // http://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address#answer-8829363 + // http://www.w3.org/TR/html5/forms.html#valid-e-mail-address (search for 'willful violation') + email: /^[a-z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?)*$/i, + hostname: HOSTNAME, + // optimized https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9780596802837/ch07s16.html + ipv4: /^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/, + // optimized http://stackoverflow.com/questions/53497/regular-expression-that-matches-valid-ipv6-addresses + ipv6: /^\s*(?:(?:(?:[0-9a-f]{1,4}:){7}(?:[0-9a-f]{1,4}|:))|(?:(?:[0-9a-f]{1,4}:){6}(?::[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){5}(?:(?:(?::[0-9a-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){4}(?:(?:(?::[0-9a-f]{1,4}){1,3})|(?:(?::[0-9a-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){3}(?:(?:(?::[0-9a-f]{1,4}){1,4})|(?:(?::[0-9a-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){2}(?:(?:(?::[0-9a-f]{1,4}){1,5})|(?:(?::[0-9a-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){1}(?:(?:(?::[0-9a-f]{1,4}){1,6})|(?:(?::[0-9a-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?::(?:(?:(?::[0-9a-f]{1,4}){1,7})|(?:(?::[0-9a-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(?:%.+)?\s*$/i, + regex: regex, + // uuid: http://tools.ietf.org/html/rfc4122 + uuid: UUID, + // JSON-pointer: https://tools.ietf.org/html/rfc6901 + // uri fragment: https://tools.ietf.org/html/rfc3986#appendix-A + 'json-pointer': JSON_POINTER, + 'json-pointer-uri-fragment': JSON_POINTER_URI_FRAGMENT, + // relative JSON-pointer: http://tools.ietf.org/html/draft-luff-relative-json-pointer-00 + 'relative-json-pointer': RELATIVE_JSON_POINTER +}; + + +formats.full = { + date: date, + time: time, + 'date-time': date_time, + uri: uri, + 'uri-reference': URIREF, + 'uri-template': URITEMPLATE, + url: URL, + email: /^[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$/i, + hostname: hostname, + ipv4: /^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/, + ipv6: /^\s*(?:(?:(?:[0-9a-f]{1,4}:){7}(?:[0-9a-f]{1,4}|:))|(?:(?:[0-9a-f]{1,4}:){6}(?::[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){5}(?:(?:(?::[0-9a-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){4}(?:(?:(?::[0-9a-f]{1,4}){1,3})|(?:(?::[0-9a-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){3}(?:(?:(?::[0-9a-f]{1,4}){1,4})|(?:(?::[0-9a-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){2}(?:(?:(?::[0-9a-f]{1,4}){1,5})|(?:(?::[0-9a-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){1}(?:(?:(?::[0-9a-f]{1,4}){1,6})|(?:(?::[0-9a-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?::(?:(?:(?::[0-9a-f]{1,4}){1,7})|(?:(?::[0-9a-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(?:%.+)?\s*$/i, + regex: regex, + uuid: UUID, + 'json-pointer': JSON_POINTER, + 'json-pointer-uri-fragment': JSON_POINTER_URI_FRAGMENT, + 'relative-json-pointer': RELATIVE_JSON_POINTER +}; + + +function isLeapYear(year) { + // https://tools.ietf.org/html/rfc3339#appendix-C + return year % 4 === 0 && (year % 100 !== 0 || year % 400 === 0); +} + + +function date(str) { + // full-date from http://tools.ietf.org/html/rfc3339#section-5.6 + var matches = str.match(DATE); + if (!matches) return false; + + var year = +matches[1]; + var month = +matches[2]; + var day = +matches[3]; + + return month >= 1 && month <= 12 && day >= 1 && + day <= (month == 2 && isLeapYear(year) ? 29 : DAYS[month]); +} + + +function time(str, full) { + var matches = str.match(TIME); + if (!matches) return false; + + var hour = matches[1]; + var minute = matches[2]; + var second = matches[3]; + var timeZone = matches[5]; + return ((hour <= 23 && minute <= 59 && second <= 59) || + (hour == 23 && minute == 59 && second == 60)) && + (!full || timeZone); +} + + +var DATE_TIME_SEPARATOR = /t|\s/i; +function date_time(str) { + // http://tools.ietf.org/html/rfc3339#section-5.6 + var dateTime = str.split(DATE_TIME_SEPARATOR); + return dateTime.length == 2 && date(dateTime[0]) && time(dateTime[1], true); +} + + +function hostname(str) { + // https://tools.ietf.org/html/rfc1034#section-3.5 + // https://tools.ietf.org/html/rfc1123#section-2 + return str.length <= 255 && HOSTNAME.test(str); +} + + +var NOT_URI_FRAGMENT = /\/|:/; +function uri(str) { + // http://jmrware.com/articles/2009/uri_regexp/URI_regex.html + optional protocol + required "." + return NOT_URI_FRAGMENT.test(str) && URI.test(str); +} + + +var Z_ANCHOR = /[^\\]\\Z/; +function regex(str) { + if (Z_ANCHOR.test(str)) return false; + try { + new RegExp(str); + return true; + } catch(e) { + return false; + } +} diff --git a/src/node_modules/ajv/lib/compile/index.js b/src/node_modules/ajv/lib/compile/index.js new file mode 100644 index 0000000..f4d3f0d --- /dev/null +++ b/src/node_modules/ajv/lib/compile/index.js @@ -0,0 +1,387 @@ +'use strict'; + +var resolve = require('./resolve') + , util = require('./util') + , errorClasses = require('./error_classes') + , stableStringify = require('fast-json-stable-stringify'); + +var validateGenerator = require('../dotjs/validate'); + +/** + * Functions below are used inside compiled validations function + */ + +var ucs2length = util.ucs2length; +var equal = require('fast-deep-equal'); + +// this error is thrown by async schemas to return validation errors via exception +var ValidationError = errorClasses.Validation; + +module.exports = compile; + + +/** + * Compiles schema to validation function + * @this Ajv + * @param {Object} schema schema object + * @param {Object} root object with information about the root schema for this schema + * @param {Object} localRefs the hash of local references inside the schema (created by resolve.id), used for inline resolution + * @param {String} baseId base ID for IDs in the schema + * @return {Function} validation function + */ +function compile(schema, root, localRefs, baseId) { + /* jshint validthis: true, evil: true */ + /* eslint no-shadow: 0 */ + var self = this + , opts = this._opts + , refVal = [ undefined ] + , refs = {} + , patterns = [] + , patternsHash = {} + , defaults = [] + , defaultsHash = {} + , customRules = []; + + root = root || { schema: schema, refVal: refVal, refs: refs }; + + var c = checkCompiling.call(this, schema, root, baseId); + var compilation = this._compilations[c.index]; + if (c.compiling) return (compilation.callValidate = callValidate); + + var formats = this._formats; + var RULES = this.RULES; + + try { + var v = localCompile(schema, root, localRefs, baseId); + compilation.validate = v; + var cv = compilation.callValidate; + if (cv) { + cv.schema = v.schema; + cv.errors = null; + cv.refs = v.refs; + cv.refVal = v.refVal; + cv.root = v.root; + cv.$async = v.$async; + if (opts.sourceCode) cv.source = v.source; + } + return v; + } finally { + endCompiling.call(this, schema, root, baseId); + } + + /* @this {*} - custom context, see passContext option */ + function callValidate() { + /* jshint validthis: true */ + var validate = compilation.validate; + var result = validate.apply(this, arguments); + callValidate.errors = validate.errors; + return result; + } + + function localCompile(_schema, _root, localRefs, baseId) { + var isRoot = !_root || (_root && _root.schema == _schema); + if (_root.schema != root.schema) + return compile.call(self, _schema, _root, localRefs, baseId); + + var $async = _schema.$async === true; + + var sourceCode = validateGenerator({ + isTop: true, + schema: _schema, + isRoot: isRoot, + baseId: baseId, + root: _root, + schemaPath: '', + errSchemaPath: '#', + errorPath: '""', + MissingRefError: errorClasses.MissingRef, + RULES: RULES, + validate: validateGenerator, + util: util, + resolve: resolve, + resolveRef: resolveRef, + usePattern: usePattern, + useDefault: useDefault, + useCustomRule: useCustomRule, + opts: opts, + formats: formats, + logger: self.logger, + self: self + }); + + sourceCode = vars(refVal, refValCode) + vars(patterns, patternCode) + + vars(defaults, defaultCode) + vars(customRules, customRuleCode) + + sourceCode; + + if (opts.processCode) sourceCode = opts.processCode(sourceCode); + // console.log('\n\n\n *** \n', JSON.stringify(sourceCode)); + var validate; + try { + var makeValidate = new Function( + 'self', + 'RULES', + 'formats', + 'root', + 'refVal', + 'defaults', + 'customRules', + 'equal', + 'ucs2length', + 'ValidationError', + sourceCode + ); + + validate = makeValidate( + self, + RULES, + formats, + root, + refVal, + defaults, + customRules, + equal, + ucs2length, + ValidationError + ); + + refVal[0] = validate; + } catch(e) { + self.logger.error('Error compiling schema, function code:', sourceCode); + throw e; + } + + validate.schema = _schema; + validate.errors = null; + validate.refs = refs; + validate.refVal = refVal; + validate.root = isRoot ? validate : _root; + if ($async) validate.$async = true; + if (opts.sourceCode === true) { + validate.source = { + code: sourceCode, + patterns: patterns, + defaults: defaults + }; + } + + return validate; + } + + function resolveRef(baseId, ref, isRoot) { + ref = resolve.url(baseId, ref); + var refIndex = refs[ref]; + var _refVal, refCode; + if (refIndex !== undefined) { + _refVal = refVal[refIndex]; + refCode = 'refVal[' + refIndex + ']'; + return resolvedRef(_refVal, refCode); + } + if (!isRoot && root.refs) { + var rootRefId = root.refs[ref]; + if (rootRefId !== undefined) { + _refVal = root.refVal[rootRefId]; + refCode = addLocalRef(ref, _refVal); + return resolvedRef(_refVal, refCode); + } + } + + refCode = addLocalRef(ref); + var v = resolve.call(self, localCompile, root, ref); + if (v === undefined) { + var localSchema = localRefs && localRefs[ref]; + if (localSchema) { + v = resolve.inlineRef(localSchema, opts.inlineRefs) + ? localSchema + : compile.call(self, localSchema, root, localRefs, baseId); + } + } + + if (v === undefined) { + removeLocalRef(ref); + } else { + replaceLocalRef(ref, v); + return resolvedRef(v, refCode); + } + } + + function addLocalRef(ref, v) { + var refId = refVal.length; + refVal[refId] = v; + refs[ref] = refId; + return 'refVal' + refId; + } + + function removeLocalRef(ref) { + delete refs[ref]; + } + + function replaceLocalRef(ref, v) { + var refId = refs[ref]; + refVal[refId] = v; + } + + function resolvedRef(refVal, code) { + return typeof refVal == 'object' || typeof refVal == 'boolean' + ? { code: code, schema: refVal, inline: true } + : { code: code, $async: refVal && !!refVal.$async }; + } + + function usePattern(regexStr) { + var index = patternsHash[regexStr]; + if (index === undefined) { + index = patternsHash[regexStr] = patterns.length; + patterns[index] = regexStr; + } + return 'pattern' + index; + } + + function useDefault(value) { + switch (typeof value) { + case 'boolean': + case 'number': + return '' + value; + case 'string': + return util.toQuotedString(value); + case 'object': + if (value === null) return 'null'; + var valueStr = stableStringify(value); + var index = defaultsHash[valueStr]; + if (index === undefined) { + index = defaultsHash[valueStr] = defaults.length; + defaults[index] = value; + } + return 'default' + index; + } + } + + function useCustomRule(rule, schema, parentSchema, it) { + if (self._opts.validateSchema !== false) { + var deps = rule.definition.dependencies; + if (deps && !deps.every(function(keyword) { + return Object.prototype.hasOwnProperty.call(parentSchema, keyword); + })) + throw new Error('parent schema must have all required keywords: ' + deps.join(',')); + + var validateSchema = rule.definition.validateSchema; + if (validateSchema) { + var valid = validateSchema(schema); + if (!valid) { + var message = 'keyword schema is invalid: ' + self.errorsText(validateSchema.errors); + if (self._opts.validateSchema == 'log') self.logger.error(message); + else throw new Error(message); + } + } + } + + var compile = rule.definition.compile + , inline = rule.definition.inline + , macro = rule.definition.macro; + + var validate; + if (compile) { + validate = compile.call(self, schema, parentSchema, it); + } else if (macro) { + validate = macro.call(self, schema, parentSchema, it); + if (opts.validateSchema !== false) self.validateSchema(validate, true); + } else if (inline) { + validate = inline.call(self, it, rule.keyword, schema, parentSchema); + } else { + validate = rule.definition.validate; + if (!validate) return; + } + + if (validate === undefined) + throw new Error('custom keyword "' + rule.keyword + '"failed to compile'); + + var index = customRules.length; + customRules[index] = validate; + + return { + code: 'customRule' + index, + validate: validate + }; + } +} + + +/** + * Checks if the schema is currently compiled + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + * @return {Object} object with properties "index" (compilation index) and "compiling" (boolean) + */ +function checkCompiling(schema, root, baseId) { + /* jshint validthis: true */ + var index = compIndex.call(this, schema, root, baseId); + if (index >= 0) return { index: index, compiling: true }; + index = this._compilations.length; + this._compilations[index] = { + schema: schema, + root: root, + baseId: baseId + }; + return { index: index, compiling: false }; +} + + +/** + * Removes the schema from the currently compiled list + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + */ +function endCompiling(schema, root, baseId) { + /* jshint validthis: true */ + var i = compIndex.call(this, schema, root, baseId); + if (i >= 0) this._compilations.splice(i, 1); +} + + +/** + * Index of schema compilation in the currently compiled list + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + * @return {Integer} compilation index + */ +function compIndex(schema, root, baseId) { + /* jshint validthis: true */ + for (var i=0; i= 0xD800 && value <= 0xDBFF && pos < len) { + // high surrogate, and there is a next character + value = str.charCodeAt(pos); + if ((value & 0xFC00) == 0xDC00) pos++; // low surrogate + } + } + return length; +}; diff --git a/src/node_modules/ajv/lib/compile/util.js b/src/node_modules/ajv/lib/compile/util.js new file mode 100644 index 0000000..0efa001 --- /dev/null +++ b/src/node_modules/ajv/lib/compile/util.js @@ -0,0 +1,274 @@ +'use strict'; + + +module.exports = { + copy: copy, + checkDataType: checkDataType, + checkDataTypes: checkDataTypes, + coerceToTypes: coerceToTypes, + toHash: toHash, + getProperty: getProperty, + escapeQuotes: escapeQuotes, + equal: require('fast-deep-equal'), + ucs2length: require('./ucs2length'), + varOccurences: varOccurences, + varReplace: varReplace, + cleanUpCode: cleanUpCode, + finalCleanUpCode: finalCleanUpCode, + schemaHasRules: schemaHasRules, + schemaHasRulesExcept: schemaHasRulesExcept, + schemaUnknownRules: schemaUnknownRules, + toQuotedString: toQuotedString, + getPathExpr: getPathExpr, + getPath: getPath, + getData: getData, + unescapeFragment: unescapeFragment, + unescapeJsonPointer: unescapeJsonPointer, + escapeFragment: escapeFragment, + escapeJsonPointer: escapeJsonPointer +}; + + +function copy(o, to) { + to = to || {}; + for (var key in o) to[key] = o[key]; + return to; +} + + +function checkDataType(dataType, data, negate) { + var EQUAL = negate ? ' !== ' : ' === ' + , AND = negate ? ' || ' : ' && ' + , OK = negate ? '!' : '' + , NOT = negate ? '' : '!'; + switch (dataType) { + case 'null': return data + EQUAL + 'null'; + case 'array': return OK + 'Array.isArray(' + data + ')'; + case 'object': return '(' + OK + data + AND + + 'typeof ' + data + EQUAL + '"object"' + AND + + NOT + 'Array.isArray(' + data + '))'; + case 'integer': return '(typeof ' + data + EQUAL + '"number"' + AND + + NOT + '(' + data + ' % 1)' + + AND + data + EQUAL + data + ')'; + default: return 'typeof ' + data + EQUAL + '"' + dataType + '"'; + } +} + + +function checkDataTypes(dataTypes, data) { + switch (dataTypes.length) { + case 1: return checkDataType(dataTypes[0], data, true); + default: + var code = ''; + var types = toHash(dataTypes); + if (types.array && types.object) { + code = types.null ? '(': '(!' + data + ' || '; + code += 'typeof ' + data + ' !== "object")'; + delete types.null; + delete types.array; + delete types.object; + } + if (types.number) delete types.integer; + for (var t in types) + code += (code ? ' && ' : '' ) + checkDataType(t, data, true); + + return code; + } +} + + +var COERCE_TO_TYPES = toHash([ 'string', 'number', 'integer', 'boolean', 'null' ]); +function coerceToTypes(optionCoerceTypes, dataTypes) { + if (Array.isArray(dataTypes)) { + var types = []; + for (var i=0; i= lvl) throw new Error('Cannot access property/index ' + up + ' levels up, current level is ' + lvl); + return paths[lvl - up]; + } + + if (up > lvl) throw new Error('Cannot access data ' + up + ' levels up, current level is ' + lvl); + data = 'data' + ((lvl - up) || ''); + if (!jsonPointer) return data; + } + + var expr = data; + var segments = jsonPointer.split('/'); + for (var i=0; i' + , $notOp = $isMax ? '>' : '<' + , $errorKeyword = undefined; +}} + +{{? $isDataExcl }} + {{ + var $schemaValueExcl = it.util.getData($schemaExcl.$data, $dataLvl, it.dataPathArr) + , $exclusive = 'exclusive' + $lvl + , $exclType = 'exclType' + $lvl + , $exclIsNumber = 'exclIsNumber' + $lvl + , $opExpr = 'op' + $lvl + , $opStr = '\' + ' + $opExpr + ' + \''; + }} + var schemaExcl{{=$lvl}} = {{=$schemaValueExcl}}; + {{ $schemaValueExcl = 'schemaExcl' + $lvl; }} + + var {{=$exclusive}}; + var {{=$exclType}} = typeof {{=$schemaValueExcl}}; + if ({{=$exclType}} != 'boolean' && {{=$exclType}} != 'undefined' && {{=$exclType}} != 'number') { + {{ var $errorKeyword = $exclusiveKeyword; }} + {{# def.error:'_exclusiveLimit' }} + } else if ({{# def.$dataNotType:'number' }} + {{=$exclType}} == 'number' + ? ( + ({{=$exclusive}} = {{=$schemaValue}} === undefined || {{=$schemaValueExcl}} {{=$op}}= {{=$schemaValue}}) + ? {{=$data}} {{=$notOp}}= {{=$schemaValueExcl}} + : {{=$data}} {{=$notOp}} {{=$schemaValue}} + ) + : ( + ({{=$exclusive}} = {{=$schemaValueExcl}} === true) + ? {{=$data}} {{=$notOp}}= {{=$schemaValue}} + : {{=$data}} {{=$notOp}} {{=$schemaValue}} + ) + || {{=$data}} !== {{=$data}}) { + var op{{=$lvl}} = {{=$exclusive}} ? '{{=$op}}' : '{{=$op}}='; + {{ + if ($schema === undefined) { + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $schemaValue = $schemaValueExcl; + $isData = $isDataExcl; + } + }} +{{??}} + {{ + var $exclIsNumber = typeof $schemaExcl == 'number' + , $opStr = $op; /*used in error*/ + }} + + {{? $exclIsNumber && $isData }} + {{ var $opExpr = '\'' + $opStr + '\''; /*used in error*/ }} + if ({{# def.$dataNotType:'number' }} + ( {{=$schemaValue}} === undefined + || {{=$schemaExcl}} {{=$op}}= {{=$schemaValue}} + ? {{=$data}} {{=$notOp}}= {{=$schemaExcl}} + : {{=$data}} {{=$notOp}} {{=$schemaValue}} ) + || {{=$data}} !== {{=$data}}) { + {{??}} + {{ + if ($exclIsNumber && $schema === undefined) { + {{# def.setExclusiveLimit }} + $schemaValue = $schemaExcl; + $notOp += '='; + } else { + if ($exclIsNumber) + $schemaValue = Math[$isMax ? 'min' : 'max']($schemaExcl, $schema); + + if ($schemaExcl === ($exclIsNumber ? $schemaValue : true)) { + {{# def.setExclusiveLimit }} + $notOp += '='; + } else { + $exclusive = false; + $opStr += '='; + } + } + + var $opExpr = '\'' + $opStr + '\''; /*used in error*/ + }} + + if ({{# def.$dataNotType:'number' }} + {{=$data}} {{=$notOp}} {{=$schemaValue}} + || {{=$data}} !== {{=$data}}) { + {{?}} +{{?}} + {{ $errorKeyword = $errorKeyword || $keyword; }} + {{# def.error:'_limit' }} + } {{? $breakOnError }} else { {{?}} diff --git a/src/node_modules/ajv/lib/dot/_limitItems.jst b/src/node_modules/ajv/lib/dot/_limitItems.jst new file mode 100644 index 0000000..a3e078e --- /dev/null +++ b/src/node_modules/ajv/lib/dot/_limitItems.jst @@ -0,0 +1,10 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.$data }} + +{{ var $op = $keyword == 'maxItems' ? '>' : '<'; }} +if ({{# def.$dataNotType:'number' }} {{=$data}}.length {{=$op}} {{=$schemaValue}}) { + {{ var $errorKeyword = $keyword; }} + {{# def.error:'_limitItems' }} +} {{? $breakOnError }} else { {{?}} diff --git a/src/node_modules/ajv/lib/dot/_limitLength.jst b/src/node_modules/ajv/lib/dot/_limitLength.jst new file mode 100644 index 0000000..cfc8dbb --- /dev/null +++ b/src/node_modules/ajv/lib/dot/_limitLength.jst @@ -0,0 +1,10 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.$data }} + +{{ var $op = $keyword == 'maxLength' ? '>' : '<'; }} +if ({{# def.$dataNotType:'number' }} {{# def.strLength }} {{=$op}} {{=$schemaValue}}) { + {{ var $errorKeyword = $keyword; }} + {{# def.error:'_limitLength' }} +} {{? $breakOnError }} else { {{?}} diff --git a/src/node_modules/ajv/lib/dot/_limitProperties.jst b/src/node_modules/ajv/lib/dot/_limitProperties.jst new file mode 100644 index 0000000..da7ea77 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/_limitProperties.jst @@ -0,0 +1,10 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.$data }} + +{{ var $op = $keyword == 'maxProperties' ? '>' : '<'; }} +if ({{# def.$dataNotType:'number' }} Object.keys({{=$data}}).length {{=$op}} {{=$schemaValue}}) { + {{ var $errorKeyword = $keyword; }} + {{# def.error:'_limitProperties' }} +} {{? $breakOnError }} else { {{?}} diff --git a/src/node_modules/ajv/lib/dot/allOf.jst b/src/node_modules/ajv/lib/dot/allOf.jst new file mode 100644 index 0000000..4c28363 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/allOf.jst @@ -0,0 +1,34 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + +{{ + var $currentBaseId = $it.baseId + , $allSchemasEmpty = true; +}} + +{{~ $schema:$sch:$i }} + {{? {{# def.nonEmptySchema:$sch }} }} + {{ + $allSchemasEmpty = false; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + }} + + {{# def.insertSubschemaCode }} + + {{# def.ifResultValid }} + {{?}} +{{~}} + +{{? $breakOnError }} + {{? $allSchemasEmpty }} + if (true) { + {{??}} + {{= $closingBraces.slice(0,-1) }} + {{?}} +{{?}} + +{{# def.cleanUp }} diff --git a/src/node_modules/ajv/lib/dot/anyOf.jst b/src/node_modules/ajv/lib/dot/anyOf.jst new file mode 100644 index 0000000..086cf2b --- /dev/null +++ b/src/node_modules/ajv/lib/dot/anyOf.jst @@ -0,0 +1,48 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + +{{ + var $noEmptySchema = $schema.every(function($sch) { + return {{# def.nonEmptySchema:$sch }}; + }); +}} +{{? $noEmptySchema }} + {{ var $currentBaseId = $it.baseId; }} + var {{=$errs}} = errors; + var {{=$valid}} = false; + + {{# def.setCompositeRule }} + + {{~ $schema:$sch:$i }} + {{ + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + }} + + {{# def.insertSubschemaCode }} + + {{=$valid}} = {{=$valid}} || {{=$nextValid}}; + + if (!{{=$valid}}) { + {{ $closingBraces += '}'; }} + {{~}} + + {{# def.resetCompositeRule }} + + {{= $closingBraces }} + + if (!{{=$valid}}) { + {{# def.extraError:'anyOf' }} + } else { + {{# def.resetErrors }} + {{? it.opts.allErrors }} } {{?}} + + {{# def.cleanUp }} +{{??}} + {{? $breakOnError }} + if (true) { + {{?}} +{{?}} diff --git a/src/node_modules/ajv/lib/dot/coerce.def b/src/node_modules/ajv/lib/dot/coerce.def new file mode 100644 index 0000000..86e0e18 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/coerce.def @@ -0,0 +1,61 @@ +{{## def.coerceType: + {{ + var $dataType = 'dataType' + $lvl + , $coerced = 'coerced' + $lvl; + }} + var {{=$dataType}} = typeof {{=$data}}; + {{? it.opts.coerceTypes == 'array'}} + if ({{=$dataType}} == 'object' && Array.isArray({{=$data}})) {{=$dataType}} = 'array'; + {{?}} + + var {{=$coerced}} = undefined; + + {{ var $bracesCoercion = ''; }} + {{~ $coerceToTypes:$type:$i }} + {{? $i }} + if ({{=$coerced}} === undefined) { + {{ $bracesCoercion += '}'; }} + {{?}} + + {{? it.opts.coerceTypes == 'array' && $type != 'array' }} + if ({{=$dataType}} == 'array' && {{=$data}}.length == 1) { + {{=$coerced}} = {{=$data}} = {{=$data}}[0]; + {{=$dataType}} = typeof {{=$data}}; + /*if ({{=$dataType}} == 'object' && Array.isArray({{=$data}})) {{=$dataType}} = 'array';*/ + } + {{?}} + + {{? $type == 'string' }} + if ({{=$dataType}} == 'number' || {{=$dataType}} == 'boolean') + {{=$coerced}} = '' + {{=$data}}; + else if ({{=$data}} === null) {{=$coerced}} = ''; + {{?? $type == 'number' || $type == 'integer' }} + if ({{=$dataType}} == 'boolean' || {{=$data}} === null + || ({{=$dataType}} == 'string' && {{=$data}} && {{=$data}} == +{{=$data}} + {{? $type == 'integer' }} && !({{=$data}} % 1){{?}})) + {{=$coerced}} = +{{=$data}}; + {{?? $type == 'boolean' }} + if ({{=$data}} === 'false' || {{=$data}} === 0 || {{=$data}} === null) + {{=$coerced}} = false; + else if ({{=$data}} === 'true' || {{=$data}} === 1) + {{=$coerced}} = true; + {{?? $type == 'null' }} + if ({{=$data}} === '' || {{=$data}} === 0 || {{=$data}} === false) + {{=$coerced}} = null; + {{?? it.opts.coerceTypes == 'array' && $type == 'array' }} + if ({{=$dataType}} == 'string' || {{=$dataType}} == 'number' || {{=$dataType}} == 'boolean' || {{=$data}} == null) + {{=$coerced}} = [{{=$data}}]; + {{?}} + {{~}} + + {{= $bracesCoercion }} + + if ({{=$coerced}} === undefined) { + {{# def.error:'type' }} + } else { + {{# def.setParentData }} + {{=$data}} = {{=$coerced}}; + {{? !$dataLvl }}if ({{=$parentData}} !== undefined){{?}} + {{=$parentData}}[{{=$parentDataProperty}}] = {{=$coerced}}; + } +#}} diff --git a/src/node_modules/ajv/lib/dot/comment.jst b/src/node_modules/ajv/lib/dot/comment.jst new file mode 100644 index 0000000..f959150 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/comment.jst @@ -0,0 +1,9 @@ +{{# def.definitions }} +{{# def.setupKeyword }} + +{{ var $comment = it.util.toQuotedString($schema); }} +{{? it.opts.$comment === true }} + console.log({{=$comment}}); +{{?? typeof it.opts.$comment == 'function' }} + self._opts.$comment({{=$comment}}, {{=it.util.toQuotedString($errSchemaPath)}}, validate.root.schema); +{{?}} diff --git a/src/node_modules/ajv/lib/dot/const.jst b/src/node_modules/ajv/lib/dot/const.jst new file mode 100644 index 0000000..2aa2298 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/const.jst @@ -0,0 +1,11 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.$data }} + +{{? !$isData }} + var schema{{=$lvl}} = validate.schema{{=$schemaPath}}; +{{?}} +var {{=$valid}} = equal({{=$data}}, schema{{=$lvl}}); +{{# def.checkError:'const' }} +{{? $breakOnError }} else { {{?}} diff --git a/src/node_modules/ajv/lib/dot/contains.jst b/src/node_modules/ajv/lib/dot/contains.jst new file mode 100644 index 0000000..925d2c8 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/contains.jst @@ -0,0 +1,57 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + + +{{ + var $idx = 'i' + $lvl + , $dataNxt = $it.dataLevel = it.dataLevel + 1 + , $nextData = 'data' + $dataNxt + , $currentBaseId = it.baseId + , $nonEmptySchema = {{# def.nonEmptySchema:$schema }}; +}} + +var {{=$errs}} = errors; +var {{=$valid}}; + +{{? $nonEmptySchema }} + {{# def.setCompositeRule }} + + {{ + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + }} + + var {{=$nextValid}} = false; + + for (var {{=$idx}} = 0; {{=$idx}} < {{=$data}}.length; {{=$idx}}++) { + {{ + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + }} + + {{# def.generateSubschemaCode }} + {{# def.optimizeValidate }} + + if ({{=$nextValid}}) break; + } + + {{# def.resetCompositeRule }} + {{= $closingBraces }} + + if (!{{=$nextValid}}) { +{{??}} + if ({{=$data}}.length == 0) { +{{?}} + + {{# def.error:'contains' }} + } else { + {{? $nonEmptySchema }} + {{# def.resetErrors }} + {{?}} + {{? it.opts.allErrors }} } {{?}} + +{{# def.cleanUp }} diff --git a/src/node_modules/ajv/lib/dot/custom.jst b/src/node_modules/ajv/lib/dot/custom.jst new file mode 100644 index 0000000..d30588f --- /dev/null +++ b/src/node_modules/ajv/lib/dot/custom.jst @@ -0,0 +1,191 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.$data }} + +{{ + var $rule = this + , $definition = 'definition' + $lvl + , $rDef = $rule.definition + , $closingBraces = ''; + var $validate = $rDef.validate; + var $compile, $inline, $macro, $ruleValidate, $validateCode; +}} + +{{? $isData && $rDef.$data }} + {{ + $validateCode = 'keywordValidate' + $lvl; + var $validateSchema = $rDef.validateSchema; + }} + var {{=$definition}} = RULES.custom['{{=$keyword}}'].definition; + var {{=$validateCode}} = {{=$definition}}.validate; +{{??}} + {{ + $ruleValidate = it.useCustomRule($rule, $schema, it.schema, it); + if (!$ruleValidate) return; + $schemaValue = 'validate.schema' + $schemaPath; + $validateCode = $ruleValidate.code; + $compile = $rDef.compile; + $inline = $rDef.inline; + $macro = $rDef.macro; + }} +{{?}} + +{{ + var $ruleErrs = $validateCode + '.errors' + , $i = 'i' + $lvl + , $ruleErr = 'ruleErr' + $lvl + , $asyncKeyword = $rDef.async; + + if ($asyncKeyword && !it.async) + throw new Error('async keyword in sync schema'); +}} + + +{{? !($inline || $macro) }}{{=$ruleErrs}} = null;{{?}} +var {{=$errs}} = errors; +var {{=$valid}}; + +{{## def.callRuleValidate: + {{=$validateCode}}.call( + {{? it.opts.passContext }}this{{??}}self{{?}} + {{? $compile || $rDef.schema === false }} + , {{=$data}} + {{??}} + , {{=$schemaValue}} + , {{=$data}} + , validate.schema{{=it.schemaPath}} + {{?}} + , {{# def.dataPath }} + {{# def.passParentData }} + , rootData + ) +#}} + +{{## def.extendErrors:_inline: + for (var {{=$i}}={{=$errs}}; {{=$i}}= 0 }} + {{# def.skipFormat }} + {{??}} + {{ throw new Error('unknown format "' + $schema + '" is used in schema at path "' + it.errSchemaPath + '"'); }} + {{?}} + {{?}} + {{ + var $isObject = typeof $format == 'object' + && !($format instanceof RegExp) + && $format.validate; + var $formatType = $isObject && $format.type || 'string'; + if ($isObject) { + var $async = $format.async === true; + $format = $format.validate; + } + }} + {{? $formatType != $ruleType }} + {{# def.skipFormat }} + {{?}} + {{? $async }} + {{ + if (!it.async) throw new Error('async format in sync schema'); + var $formatRef = 'formats' + it.util.getProperty($schema) + '.validate'; + }} + if (!(await {{=$formatRef}}({{=$data}}))) { + {{??}} + if (!{{# def.checkFormat }}) { + {{?}} +{{?}} + {{# def.error:'format' }} + } {{? $breakOnError }} else { {{?}} diff --git a/src/node_modules/ajv/lib/dot/if.jst b/src/node_modules/ajv/lib/dot/if.jst new file mode 100644 index 0000000..7ccc9b7 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/if.jst @@ -0,0 +1,75 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + + +{{## def.validateIfClause:_clause: + {{ + $it.schema = it.schema['_clause']; + $it.schemaPath = it.schemaPath + '._clause'; + $it.errSchemaPath = it.errSchemaPath + '/_clause'; + }} + {{# def.insertSubschemaCode }} + {{=$valid}} = {{=$nextValid}}; + {{? $thenPresent && $elsePresent }} + {{ $ifClause = 'ifClause' + $lvl; }} + var {{=$ifClause}} = '_clause'; + {{??}} + {{ $ifClause = '\'_clause\''; }} + {{?}} +#}} + +{{ + var $thenSch = it.schema['then'] + , $elseSch = it.schema['else'] + , $thenPresent = $thenSch !== undefined && {{# def.nonEmptySchema:$thenSch }} + , $elsePresent = $elseSch !== undefined && {{# def.nonEmptySchema:$elseSch }} + , $currentBaseId = $it.baseId; +}} + +{{? $thenPresent || $elsePresent }} + {{ + var $ifClause; + $it.createErrors = false; + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + }} + var {{=$errs}} = errors; + var {{=$valid}} = true; + + {{# def.setCompositeRule }} + {{# def.insertSubschemaCode }} + {{ $it.createErrors = true; }} + {{# def.resetErrors }} + {{# def.resetCompositeRule }} + + {{? $thenPresent }} + if ({{=$nextValid}}) { + {{# def.validateIfClause:then }} + } + {{? $elsePresent }} + else { + {{?}} + {{??}} + if (!{{=$nextValid}}) { + {{?}} + + {{? $elsePresent }} + {{# def.validateIfClause:else }} + } + {{?}} + + if (!{{=$valid}}) { + {{# def.extraError:'if' }} + } + {{? $breakOnError }} else { {{?}} + + {{# def.cleanUp }} +{{??}} + {{? $breakOnError }} + if (true) { + {{?}} +{{?}} + diff --git a/src/node_modules/ajv/lib/dot/items.jst b/src/node_modules/ajv/lib/dot/items.jst new file mode 100644 index 0000000..8c0f5ac --- /dev/null +++ b/src/node_modules/ajv/lib/dot/items.jst @@ -0,0 +1,100 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + + +{{## def.validateItems:startFrom: + for (var {{=$idx}} = {{=startFrom}}; {{=$idx}} < {{=$data}}.length; {{=$idx}}++) { + {{ + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + }} + + {{# def.generateSubschemaCode }} + {{# def.optimizeValidate }} + + {{? $breakOnError }} + if (!{{=$nextValid}}) break; + {{?}} + } +#}} + +{{ + var $idx = 'i' + $lvl + , $dataNxt = $it.dataLevel = it.dataLevel + 1 + , $nextData = 'data' + $dataNxt + , $currentBaseId = it.baseId; +}} + +var {{=$errs}} = errors; +var {{=$valid}}; + +{{? Array.isArray($schema) }} + {{ /* 'items' is an array of schemas */}} + {{ var $additionalItems = it.schema.additionalItems; }} + {{? $additionalItems === false }} + {{=$valid}} = {{=$data}}.length <= {{= $schema.length }}; + {{ + var $currErrSchemaPath = $errSchemaPath; + $errSchemaPath = it.errSchemaPath + '/additionalItems'; + }} + {{# def.checkError:'additionalItems' }} + {{ $errSchemaPath = $currErrSchemaPath; }} + {{# def.elseIfValid}} + {{?}} + + {{~ $schema:$sch:$i }} + {{? {{# def.nonEmptySchema:$sch }} }} + {{=$nextValid}} = true; + + if ({{=$data}}.length > {{=$i}}) { + {{ + var $passData = $data + '[' + $i + ']'; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + $it.errorPath = it.util.getPathExpr(it.errorPath, $i, it.opts.jsonPointers, true); + $it.dataPathArr[$dataNxt] = $i; + }} + + {{# def.generateSubschemaCode }} + {{# def.optimizeValidate }} + } + + {{# def.ifResultValid }} + {{?}} + {{~}} + + {{? typeof $additionalItems == 'object' && {{# def.nonEmptySchema:$additionalItems }} }} + {{ + $it.schema = $additionalItems; + $it.schemaPath = it.schemaPath + '.additionalItems'; + $it.errSchemaPath = it.errSchemaPath + '/additionalItems'; + }} + {{=$nextValid}} = true; + + if ({{=$data}}.length > {{= $schema.length }}) { + {{# def.validateItems: $schema.length }} + } + + {{# def.ifResultValid }} + {{?}} + +{{?? {{# def.nonEmptySchema:$schema }} }} + {{ /* 'items' is a single schema */}} + {{ + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + }} + {{# def.validateItems: 0 }} +{{?}} + +{{? $breakOnError }} + {{= $closingBraces }} + if ({{=$errs}} == errors) { +{{?}} + +{{# def.cleanUp }} diff --git a/src/node_modules/ajv/lib/dot/missing.def b/src/node_modules/ajv/lib/dot/missing.def new file mode 100644 index 0000000..a73b9f9 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/missing.def @@ -0,0 +1,39 @@ +{{## def.checkMissingProperty:_properties: + {{~ _properties:$propertyKey:$i }} + {{?$i}} || {{?}} + {{ + var $prop = it.util.getProperty($propertyKey) + , $useData = $data + $prop; + }} + ( ({{# def.noPropertyInData }}) && (missing{{=$lvl}} = {{= it.util.toQuotedString(it.opts.jsonPointers ? $propertyKey : $prop) }}) ) + {{~}} +#}} + + +{{## def.errorMissingProperty:_error: + {{ + var $propertyPath = 'missing' + $lvl + , $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.opts.jsonPointers + ? it.util.getPathExpr($currentErrorPath, $propertyPath, true) + : $currentErrorPath + ' + ' + $propertyPath; + } + }} + {{# def.error:_error }} +#}} + + +{{## def.allErrorsMissingProperty:_error: + {{ + var $prop = it.util.getProperty($propertyKey) + , $missingProperty = it.util.escapeQuotes($propertyKey) + , $useData = $data + $prop; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + }} + if ({{# def.noPropertyInData }}) { + {{# def.addError:_error }} + } +#}} diff --git a/src/node_modules/ajv/lib/dot/multipleOf.jst b/src/node_modules/ajv/lib/dot/multipleOf.jst new file mode 100644 index 0000000..5f8dd33 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/multipleOf.jst @@ -0,0 +1,20 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.$data }} + +var division{{=$lvl}}; +if ({{?$isData}} + {{=$schemaValue}} !== undefined && ( + typeof {{=$schemaValue}} != 'number' || + {{?}} + (division{{=$lvl}} = {{=$data}} / {{=$schemaValue}}, + {{? it.opts.multipleOfPrecision }} + Math.abs(Math.round(division{{=$lvl}}) - division{{=$lvl}}) > 1e-{{=it.opts.multipleOfPrecision}} + {{??}} + division{{=$lvl}} !== parseInt(division{{=$lvl}}) + {{?}} + ) + {{?$isData}} ) {{?}} ) { + {{# def.error:'multipleOf' }} +} {{? $breakOnError }} else { {{?}} diff --git a/src/node_modules/ajv/lib/dot/not.jst b/src/node_modules/ajv/lib/dot/not.jst new file mode 100644 index 0000000..e03185a --- /dev/null +++ b/src/node_modules/ajv/lib/dot/not.jst @@ -0,0 +1,43 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + +{{? {{# def.nonEmptySchema:$schema }} }} + {{ + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + }} + + var {{=$errs}} = errors; + + {{# def.setCompositeRule }} + + {{ + $it.createErrors = false; + var $allErrorsOption; + if ($it.opts.allErrors) { + $allErrorsOption = $it.opts.allErrors; + $it.opts.allErrors = false; + } + }} + {{= it.validate($it) }} + {{ + $it.createErrors = true; + if ($allErrorsOption) $it.opts.allErrors = $allErrorsOption; + }} + + {{# def.resetCompositeRule }} + + if ({{=$nextValid}}) { + {{# def.error:'not' }} + } else { + {{# def.resetErrors }} + {{? it.opts.allErrors }} } {{?}} +{{??}} + {{# def.addError:'not' }} + {{? $breakOnError}} + if (false) { + {{?}} +{{?}} diff --git a/src/node_modules/ajv/lib/dot/oneOf.jst b/src/node_modules/ajv/lib/dot/oneOf.jst new file mode 100644 index 0000000..bcce2c6 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/oneOf.jst @@ -0,0 +1,54 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + +{{ + var $currentBaseId = $it.baseId + , $prevValid = 'prevValid' + $lvl + , $passingSchemas = 'passingSchemas' + $lvl; +}} + +var {{=$errs}} = errors + , {{=$prevValid}} = false + , {{=$valid}} = false + , {{=$passingSchemas}} = null; + +{{# def.setCompositeRule }} + +{{~ $schema:$sch:$i }} + {{? {{# def.nonEmptySchema:$sch }} }} + {{ + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + }} + + {{# def.insertSubschemaCode }} + {{??}} + var {{=$nextValid}} = true; + {{?}} + + {{? $i }} + if ({{=$nextValid}} && {{=$prevValid}}) { + {{=$valid}} = false; + {{=$passingSchemas}} = [{{=$passingSchemas}}, {{=$i}}]; + } else { + {{ $closingBraces += '}'; }} + {{?}} + + if ({{=$nextValid}}) { + {{=$valid}} = {{=$prevValid}} = true; + {{=$passingSchemas}} = {{=$i}}; + } +{{~}} + +{{# def.resetCompositeRule }} + +{{= $closingBraces }} + +if (!{{=$valid}}) { + {{# def.extraError:'oneOf' }} +} else { + {{# def.resetErrors }} +{{? it.opts.allErrors }} } {{?}} diff --git a/src/node_modules/ajv/lib/dot/pattern.jst b/src/node_modules/ajv/lib/dot/pattern.jst new file mode 100644 index 0000000..3a37ef6 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/pattern.jst @@ -0,0 +1,14 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.$data }} + +{{ + var $regexp = $isData + ? '(new RegExp(' + $schemaValue + '))' + : it.usePattern($schema); +}} + +if ({{# def.$dataNotType:'string' }} !{{=$regexp}}.test({{=$data}}) ) { + {{# def.error:'pattern' }} +} {{? $breakOnError }} else { {{?}} diff --git a/src/node_modules/ajv/lib/dot/properties.jst b/src/node_modules/ajv/lib/dot/properties.jst new file mode 100644 index 0000000..862067e --- /dev/null +++ b/src/node_modules/ajv/lib/dot/properties.jst @@ -0,0 +1,244 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + + +{{## def.validateAdditional: + {{ /* additionalProperties is schema */ + $it.schema = $aProperties; + $it.schemaPath = it.schemaPath + '.additionalProperties'; + $it.errSchemaPath = it.errSchemaPath + '/additionalProperties'; + $it.errorPath = it.opts._errorDataPathProperty + ? it.errorPath + : it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + }} + + {{# def.generateSubschemaCode }} + {{# def.optimizeValidate }} +#}} + + +{{ + var $key = 'key' + $lvl + , $idx = 'idx' + $lvl + , $dataNxt = $it.dataLevel = it.dataLevel + 1 + , $nextData = 'data' + $dataNxt + , $dataProperties = 'dataProperties' + $lvl; + + var $schemaKeys = Object.keys($schema || {}) + , $pProperties = it.schema.patternProperties || {} + , $pPropertyKeys = Object.keys($pProperties) + , $aProperties = it.schema.additionalProperties + , $someProperties = $schemaKeys.length || $pPropertyKeys.length + , $noAdditional = $aProperties === false + , $additionalIsSchema = typeof $aProperties == 'object' + && Object.keys($aProperties).length + , $removeAdditional = it.opts.removeAdditional + , $checkAdditional = $noAdditional || $additionalIsSchema || $removeAdditional + , $ownProperties = it.opts.ownProperties + , $currentBaseId = it.baseId; + + var $required = it.schema.required; + if ($required && !(it.opts.$data && $required.$data) && $required.length < it.opts.loopRequired) + var $requiredHash = it.util.toHash($required); +}} + + +var {{=$errs}} = errors; +var {{=$nextValid}} = true; +{{? $ownProperties }} + var {{=$dataProperties}} = undefined; +{{?}} + +{{? $checkAdditional }} + {{# def.iterateProperties }} + {{? $someProperties }} + var isAdditional{{=$lvl}} = !(false + {{? $schemaKeys.length }} + {{? $schemaKeys.length > 8 }} + || validate.schema{{=$schemaPath}}.hasOwnProperty({{=$key}}) + {{??}} + {{~ $schemaKeys:$propertyKey }} + || {{=$key}} == {{= it.util.toQuotedString($propertyKey) }} + {{~}} + {{?}} + {{?}} + {{? $pPropertyKeys.length }} + {{~ $pPropertyKeys:$pProperty:$i }} + || {{= it.usePattern($pProperty) }}.test({{=$key}}) + {{~}} + {{?}} + ); + + if (isAdditional{{=$lvl}}) { + {{?}} + {{? $removeAdditional == 'all' }} + delete {{=$data}}[{{=$key}}]; + {{??}} + {{ + var $currentErrorPath = it.errorPath; + var $additionalProperty = '\' + ' + $key + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + } + }} + {{? $noAdditional }} + {{? $removeAdditional }} + delete {{=$data}}[{{=$key}}]; + {{??}} + {{=$nextValid}} = false; + {{ + var $currErrSchemaPath = $errSchemaPath; + $errSchemaPath = it.errSchemaPath + '/additionalProperties'; + }} + {{# def.error:'additionalProperties' }} + {{ $errSchemaPath = $currErrSchemaPath; }} + {{? $breakOnError }} break; {{?}} + {{?}} + {{?? $additionalIsSchema }} + {{? $removeAdditional == 'failing' }} + var {{=$errs}} = errors; + {{# def.setCompositeRule }} + + {{# def.validateAdditional }} + + if (!{{=$nextValid}}) { + errors = {{=$errs}}; + if (validate.errors !== null) { + if (errors) validate.errors.length = errors; + else validate.errors = null; + } + delete {{=$data}}[{{=$key}}]; + } + + {{# def.resetCompositeRule }} + {{??}} + {{# def.validateAdditional }} + {{? $breakOnError }} if (!{{=$nextValid}}) break; {{?}} + {{?}} + {{?}} + {{ it.errorPath = $currentErrorPath; }} + {{?}} + {{? $someProperties }} + } + {{?}} + } + + {{# def.ifResultValid }} +{{?}} + +{{ var $useDefaults = it.opts.useDefaults && !it.compositeRule; }} + +{{? $schemaKeys.length }} + {{~ $schemaKeys:$propertyKey }} + {{ var $sch = $schema[$propertyKey]; }} + + {{? {{# def.nonEmptySchema:$sch}} }} + {{ + var $prop = it.util.getProperty($propertyKey) + , $passData = $data + $prop + , $hasDefault = $useDefaults && $sch.default !== undefined; + $it.schema = $sch; + $it.schemaPath = $schemaPath + $prop; + $it.errSchemaPath = $errSchemaPath + '/' + it.util.escapeFragment($propertyKey); + $it.errorPath = it.util.getPath(it.errorPath, $propertyKey, it.opts.jsonPointers); + $it.dataPathArr[$dataNxt] = it.util.toQuotedString($propertyKey); + }} + + {{# def.generateSubschemaCode }} + + {{? {{# def.willOptimize }} }} + {{ + $code = {{# def._optimizeValidate }}; + var $useData = $passData; + }} + {{??}} + {{ var $useData = $nextData; }} + var {{=$nextData}} = {{=$passData}}; + {{?}} + + {{? $hasDefault }} + {{= $code }} + {{??}} + {{? $requiredHash && $requiredHash[$propertyKey] }} + if ({{# def.noPropertyInData }}) { + {{=$nextValid}} = false; + {{ + var $currentErrorPath = it.errorPath + , $currErrSchemaPath = $errSchemaPath + , $missingProperty = it.util.escapeQuotes($propertyKey); + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + $errSchemaPath = it.errSchemaPath + '/required'; + }} + {{# def.error:'required' }} + {{ $errSchemaPath = $currErrSchemaPath; }} + {{ it.errorPath = $currentErrorPath; }} + } else { + {{??}} + {{? $breakOnError }} + if ({{# def.noPropertyInData }}) { + {{=$nextValid}} = true; + } else { + {{??}} + if ({{=$useData}} !== undefined + {{? $ownProperties }} + && {{# def.isOwnProperty }} + {{?}} + ) { + {{?}} + {{?}} + + {{= $code }} + } + {{?}} {{ /* $hasDefault */ }} + {{?}} {{ /* def.nonEmptySchema */ }} + + {{# def.ifResultValid }} + {{~}} +{{?}} + +{{? $pPropertyKeys.length }} + {{~ $pPropertyKeys:$pProperty }} + {{ var $sch = $pProperties[$pProperty]; }} + + {{? {{# def.nonEmptySchema:$sch}} }} + {{ + $it.schema = $sch; + $it.schemaPath = it.schemaPath + '.patternProperties' + it.util.getProperty($pProperty); + $it.errSchemaPath = it.errSchemaPath + '/patternProperties/' + + it.util.escapeFragment($pProperty); + }} + + {{# def.iterateProperties }} + if ({{= it.usePattern($pProperty) }}.test({{=$key}})) { + {{ + $it.errorPath = it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + }} + + {{# def.generateSubschemaCode }} + {{# def.optimizeValidate }} + + {{? $breakOnError }} if (!{{=$nextValid}}) break; {{?}} + } + {{? $breakOnError }} else {{=$nextValid}} = true; {{?}} + } + + {{# def.ifResultValid }} + {{?}} {{ /* def.nonEmptySchema */ }} + {{~}} +{{?}} + + +{{? $breakOnError }} + {{= $closingBraces }} + if ({{=$errs}} == errors) { +{{?}} + +{{# def.cleanUp }} diff --git a/src/node_modules/ajv/lib/dot/propertyNames.jst b/src/node_modules/ajv/lib/dot/propertyNames.jst new file mode 100644 index 0000000..ee52b21 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/propertyNames.jst @@ -0,0 +1,54 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.setupNextLevel }} + +var {{=$errs}} = errors; + +{{? {{# def.nonEmptySchema:$schema }} }} + {{ + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + }} + + {{ + var $key = 'key' + $lvl + , $idx = 'idx' + $lvl + , $i = 'i' + $lvl + , $invalidName = '\' + ' + $key + ' + \'' + , $dataNxt = $it.dataLevel = it.dataLevel + 1 + , $nextData = 'data' + $dataNxt + , $dataProperties = 'dataProperties' + $lvl + , $ownProperties = it.opts.ownProperties + , $currentBaseId = it.baseId; + }} + + {{? $ownProperties }} + var {{=$dataProperties}} = undefined; + {{?}} + {{# def.iterateProperties }} + var startErrs{{=$lvl}} = errors; + + {{ var $passData = $key; }} + {{# def.setCompositeRule }} + {{# def.generateSubschemaCode }} + {{# def.optimizeValidate }} + {{# def.resetCompositeRule }} + + if (!{{=$nextValid}}) { + for (var {{=$i}}=startErrs{{=$lvl}}; {{=$i}}= it.opts.loopRequired + , $ownProperties = it.opts.ownProperties; + }} + + {{? $breakOnError }} + var missing{{=$lvl}}; + {{? $loopRequired }} + {{# def.setupLoop }} + var {{=$valid}} = true; + + {{?$isData}}{{# def.check$dataIsArray }}{{?}} + + for (var {{=$i}} = 0; {{=$i}} < {{=$vSchema}}.length; {{=$i}}++) { + {{=$valid}} = {{=$data}}[{{=$vSchema}}[{{=$i}}]] !== undefined + {{? $ownProperties }} + && {{# def.isRequiredOwnProperty }} + {{?}}; + if (!{{=$valid}}) break; + } + + {{? $isData }} } {{?}} + + {{# def.checkError:'required' }} + else { + {{??}} + if ({{# def.checkMissingProperty:$required }}) { + {{# def.errorMissingProperty:'required' }} + } else { + {{?}} + {{??}} + {{? $loopRequired }} + {{# def.setupLoop }} + {{? $isData }} + if ({{=$vSchema}} && !Array.isArray({{=$vSchema}})) { + {{# def.addError:'required' }} + } else if ({{=$vSchema}} !== undefined) { + {{?}} + + for (var {{=$i}} = 0; {{=$i}} < {{=$vSchema}}.length; {{=$i}}++) { + if ({{=$data}}[{{=$vSchema}}[{{=$i}}]] === undefined + {{? $ownProperties }} + || !{{# def.isRequiredOwnProperty }} + {{?}}) { + {{# def.addError:'required' }} + } + } + + {{? $isData }} } {{?}} + {{??}} + {{~ $required:$propertyKey }} + {{# def.allErrorsMissingProperty:'required' }} + {{~}} + {{?}} + {{?}} + + {{ it.errorPath = $currentErrorPath; }} + +{{?? $breakOnError }} + if (true) { +{{?}} diff --git a/src/node_modules/ajv/lib/dot/uniqueItems.jst b/src/node_modules/ajv/lib/dot/uniqueItems.jst new file mode 100644 index 0000000..22f82f9 --- /dev/null +++ b/src/node_modules/ajv/lib/dot/uniqueItems.jst @@ -0,0 +1,62 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.setupKeyword }} +{{# def.$data }} + + +{{? ($schema || $isData) && it.opts.uniqueItems !== false }} + {{? $isData }} + var {{=$valid}}; + if ({{=$schemaValue}} === false || {{=$schemaValue}} === undefined) + {{=$valid}} = true; + else if (typeof {{=$schemaValue}} != 'boolean') + {{=$valid}} = false; + else { + {{?}} + + var i = {{=$data}}.length + , {{=$valid}} = true + , j; + if (i > 1) { + {{ + var $itemType = it.schema.items && it.schema.items.type + , $typeIsArray = Array.isArray($itemType); + }} + {{? !$itemType || $itemType == 'object' || $itemType == 'array' || + ($typeIsArray && ($itemType.indexOf('object') >= 0 || $itemType.indexOf('array') >= 0)) }} + outer: + for (;i--;) { + for (j = i; j--;) { + if (equal({{=$data}}[i], {{=$data}}[j])) { + {{=$valid}} = false; + break outer; + } + } + } + {{??}} + var itemIndices = {}, item; + for (;i--;) { + var item = {{=$data}}[i]; + {{ var $method = 'checkDataType' + ($typeIsArray ? 's' : ''); }} + if ({{= it.util[$method]($itemType, 'item', true) }}) continue; + {{? $typeIsArray}} + if (typeof item == 'string') item = '"' + item; + {{?}} + if (typeof itemIndices[item] == 'number') { + {{=$valid}} = false; + j = itemIndices[item]; + break; + } + itemIndices[item] = i; + } + {{?}} + } + + {{? $isData }} } {{?}} + + if (!{{=$valid}}) { + {{# def.error:'uniqueItems' }} + } {{? $breakOnError }} else { {{?}} +{{??}} + {{? $breakOnError }} if (true) { {{?}} +{{?}} diff --git a/src/node_modules/ajv/lib/dot/validate.jst b/src/node_modules/ajv/lib/dot/validate.jst new file mode 100644 index 0000000..f8a1edf --- /dev/null +++ b/src/node_modules/ajv/lib/dot/validate.jst @@ -0,0 +1,282 @@ +{{# def.definitions }} +{{# def.errors }} +{{# def.defaults }} +{{# def.coerce }} + +{{ /** + * schema compilation (render) time: + * it = { schema, RULES, _validate, opts } + * it.validate - this template function, + * it is used recursively to generate code for subschemas + * + * runtime: + * "validate" is a variable name to which this function will be assigned + * validateRef etc. are defined in the parent scope in index.js + */ }} + +{{ + var $async = it.schema.$async === true + , $refKeywords = it.util.schemaHasRulesExcept(it.schema, it.RULES.all, '$ref') + , $id = it.self._getId(it.schema); +}} + +{{ + if (it.opts.strictKeywords) { + var $unknownKwd = it.util.schemaUnknownRules(it.schema, it.RULES.keywords); + if ($unknownKwd) { + var $keywordsMsg = 'unknown keyword: ' + $unknownKwd; + if (it.opts.strictKeywords === 'log') it.logger.warn($keywordsMsg); + else throw new Error($keywordsMsg); + } + } +}} + +{{? it.isTop }} + var validate = {{?$async}}{{it.async = true;}}async {{?}}function(data, dataPath, parentData, parentDataProperty, rootData) { + 'use strict'; + {{? $id && (it.opts.sourceCode || it.opts.processCode) }} + {{= '/\*# sourceURL=' + $id + ' */' }} + {{?}} +{{?}} + +{{? typeof it.schema == 'boolean' || !($refKeywords || it.schema.$ref) }} + {{ var $keyword = 'false schema'; }} + {{# def.setupKeyword }} + {{? it.schema === false}} + {{? it.isTop}} + {{ $breakOnError = true; }} + {{??}} + var {{=$valid}} = false; + {{?}} + {{# def.error:'false schema' }} + {{??}} + {{? it.isTop}} + {{? $async }} + return data; + {{??}} + validate.errors = null; + return true; + {{?}} + {{??}} + var {{=$valid}} = true; + {{?}} + {{?}} + + {{? it.isTop}} + }; + return validate; + {{?}} + + {{ return out; }} +{{?}} + + +{{? it.isTop }} + {{ + var $top = it.isTop + , $lvl = it.level = 0 + , $dataLvl = it.dataLevel = 0 + , $data = 'data'; + it.rootId = it.resolve.fullPath(it.self._getId(it.root.schema)); + it.baseId = it.baseId || it.rootId; + delete it.isTop; + + it.dataPathArr = [undefined]; + + if (it.schema.default !== undefined && it.opts.useDefaults && it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored in the schema root'; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + }} + + var vErrors = null; {{ /* don't edit, used in replace */ }} + var errors = 0; {{ /* don't edit, used in replace */ }} + if (rootData === undefined) rootData = data; {{ /* don't edit, used in replace */ }} +{{??}} + {{ + var $lvl = it.level + , $dataLvl = it.dataLevel + , $data = 'data' + ($dataLvl || ''); + + if ($id) it.baseId = it.resolve.url(it.baseId, $id); + + if ($async && !it.async) throw new Error('async schema in sync schema'); + }} + + var errs_{{=$lvl}} = errors; +{{?}} + +{{ + var $valid = 'valid' + $lvl + , $breakOnError = !it.opts.allErrors + , $closingBraces1 = '' + , $closingBraces2 = ''; + + var $errorKeyword; + var $typeSchema = it.schema.type + , $typeIsArray = Array.isArray($typeSchema); + + if ($typeSchema && it.opts.nullable && it.schema.nullable === true) { + if ($typeIsArray) { + if ($typeSchema.indexOf('null') == -1) + $typeSchema = $typeSchema.concat('null'); + } else if ($typeSchema != 'null') { + $typeSchema = [$typeSchema, 'null']; + $typeIsArray = true; + } + } + + if ($typeIsArray && $typeSchema.length == 1) { + $typeSchema = $typeSchema[0]; + $typeIsArray = false; + } +}} + +{{## def.checkType: + {{ + var $schemaPath = it.schemaPath + '.type' + , $errSchemaPath = it.errSchemaPath + '/type' + , $method = $typeIsArray ? 'checkDataTypes' : 'checkDataType'; + }} + + if ({{= it.util[$method]($typeSchema, $data, true) }}) { +#}} + +{{? it.schema.$ref && $refKeywords }} + {{? it.opts.extendRefs == 'fail' }} + {{ throw new Error('$ref: validation keywords used in schema at path "' + it.errSchemaPath + '" (see option extendRefs)'); }} + {{?? it.opts.extendRefs !== true }} + {{ + $refKeywords = false; + it.logger.warn('$ref: keywords ignored in schema at path "' + it.errSchemaPath + '"'); + }} + {{?}} +{{?}} + +{{? it.schema.$comment && it.opts.$comment }} + {{= it.RULES.all.$comment.code(it, '$comment') }} +{{?}} + +{{? $typeSchema }} + {{? it.opts.coerceTypes }} + {{ var $coerceToTypes = it.util.coerceToTypes(it.opts.coerceTypes, $typeSchema); }} + {{?}} + + {{ var $rulesGroup = it.RULES.types[$typeSchema]; }} + {{? $coerceToTypes || $typeIsArray || $rulesGroup === true || + ($rulesGroup && !$shouldUseGroup($rulesGroup)) }} + {{ + var $schemaPath = it.schemaPath + '.type' + , $errSchemaPath = it.errSchemaPath + '/type'; + }} + {{# def.checkType }} + {{? $coerceToTypes }} + {{# def.coerceType }} + {{??}} + {{# def.error:'type' }} + {{?}} + } + {{?}} +{{?}} + + +{{? it.schema.$ref && !$refKeywords }} + {{= it.RULES.all.$ref.code(it, '$ref') }} + {{? $breakOnError }} + } + if (errors === {{?$top}}0{{??}}errs_{{=$lvl}}{{?}}) { + {{ $closingBraces2 += '}'; }} + {{?}} +{{??}} + {{~ it.RULES:$rulesGroup }} + {{? $shouldUseGroup($rulesGroup) }} + {{? $rulesGroup.type }} + if ({{= it.util.checkDataType($rulesGroup.type, $data) }}) { + {{?}} + {{? it.opts.useDefaults }} + {{? $rulesGroup.type == 'object' && it.schema.properties }} + {{# def.defaultProperties }} + {{?? $rulesGroup.type == 'array' && Array.isArray(it.schema.items) }} + {{# def.defaultItems }} + {{?}} + {{?}} + {{~ $rulesGroup.rules:$rule }} + {{? $shouldUseRule($rule) }} + {{ var $code = $rule.code(it, $rule.keyword, $rulesGroup.type); }} + {{? $code }} + {{= $code }} + {{? $breakOnError }} + {{ $closingBraces1 += '}'; }} + {{?}} + {{?}} + {{?}} + {{~}} + {{? $breakOnError }} + {{= $closingBraces1 }} + {{ $closingBraces1 = ''; }} + {{?}} + {{? $rulesGroup.type }} + } + {{? $typeSchema && $typeSchema === $rulesGroup.type && !$coerceToTypes }} + else { + {{ + var $schemaPath = it.schemaPath + '.type' + , $errSchemaPath = it.errSchemaPath + '/type'; + }} + {{# def.error:'type' }} + } + {{?}} + {{?}} + + {{? $breakOnError }} + if (errors === {{?$top}}0{{??}}errs_{{=$lvl}}{{?}}) { + {{ $closingBraces2 += '}'; }} + {{?}} + {{?}} + {{~}} +{{?}} + +{{? $breakOnError }} {{= $closingBraces2 }} {{?}} + +{{? $top }} + {{? $async }} + if (errors === 0) return data; {{ /* don't edit, used in replace */ }} + else throw new ValidationError(vErrors); {{ /* don't edit, used in replace */ }} + {{??}} + validate.errors = vErrors; {{ /* don't edit, used in replace */ }} + return errors === 0; {{ /* don't edit, used in replace */ }} + {{?}} + }; + + return validate; +{{??}} + var {{=$valid}} = errors === errs_{{=$lvl}}; +{{?}} + +{{# def.cleanUp }} + +{{? $top }} + {{# def.finalCleanUp }} +{{?}} + +{{ + function $shouldUseGroup($rulesGroup) { + var rules = $rulesGroup.rules; + for (var i=0; i < rules.length; i++) + if ($shouldUseRule(rules[i])) + return true; + } + + function $shouldUseRule($rule) { + return it.schema[$rule.keyword] !== undefined || + ($rule.implements && $ruleImplementsSomeKeyword($rule)); + } + + function $ruleImplementsSomeKeyword($rule) { + var impl = $rule.implements; + for (var i=0; i < impl.length; i++) + if (it.schema[impl[i]] !== undefined) + return true; + } +}} diff --git a/src/node_modules/ajv/lib/dotjs/README.md b/src/node_modules/ajv/lib/dotjs/README.md new file mode 100644 index 0000000..4d99484 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/README.md @@ -0,0 +1,3 @@ +These files are compiled dot templates from dot folder. + +Do NOT edit them directly, edit the templates and run `npm run build` from main ajv folder. diff --git a/src/node_modules/ajv/lib/dotjs/_limit.js b/src/node_modules/ajv/lib/dotjs/_limit.js new file mode 100644 index 0000000..f02a760 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/_limit.js @@ -0,0 +1,157 @@ +'use strict'; +module.exports = function generate__limit(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $isMax = $keyword == 'maximum', + $exclusiveKeyword = $isMax ? 'exclusiveMaximum' : 'exclusiveMinimum', + $schemaExcl = it.schema[$exclusiveKeyword], + $isDataExcl = it.opts.$data && $schemaExcl && $schemaExcl.$data, + $op = $isMax ? '<' : '>', + $notOp = $isMax ? '>' : '<', + $errorKeyword = undefined; + if ($isDataExcl) { + var $schemaValueExcl = it.util.getData($schemaExcl.$data, $dataLvl, it.dataPathArr), + $exclusive = 'exclusive' + $lvl, + $exclType = 'exclType' + $lvl, + $exclIsNumber = 'exclIsNumber' + $lvl, + $opExpr = 'op' + $lvl, + $opStr = '\' + ' + $opExpr + ' + \''; + out += ' var schemaExcl' + ($lvl) + ' = ' + ($schemaValueExcl) + '; '; + $schemaValueExcl = 'schemaExcl' + $lvl; + out += ' var ' + ($exclusive) + '; var ' + ($exclType) + ' = typeof ' + ($schemaValueExcl) + '; if (' + ($exclType) + ' != \'boolean\' && ' + ($exclType) + ' != \'undefined\' && ' + ($exclType) + ' != \'number\') { '; + var $errorKeyword = $exclusiveKeyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_exclusiveLimit') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'' + ($exclusiveKeyword) + ' should be boolean\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($exclType) + ' == \'number\' ? ( (' + ($exclusive) + ' = ' + ($schemaValue) + ' === undefined || ' + ($schemaValueExcl) + ' ' + ($op) + '= ' + ($schemaValue) + ') ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaValueExcl) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) : ( (' + ($exclusive) + ' = ' + ($schemaValueExcl) + ' === true) ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaValue) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) || ' + ($data) + ' !== ' + ($data) + ') { var op' + ($lvl) + ' = ' + ($exclusive) + ' ? \'' + ($op) + '\' : \'' + ($op) + '=\'; '; + if ($schema === undefined) { + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $schemaValue = $schemaValueExcl; + $isData = $isDataExcl; + } + } else { + var $exclIsNumber = typeof $schemaExcl == 'number', + $opStr = $op; + if ($exclIsNumber && $isData) { + var $opExpr = '\'' + $opStr + '\''; + out += ' if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ( ' + ($schemaValue) + ' === undefined || ' + ($schemaExcl) + ' ' + ($op) + '= ' + ($schemaValue) + ' ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaExcl) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) || ' + ($data) + ' !== ' + ($data) + ') { '; + } else { + if ($exclIsNumber && $schema === undefined) { + $exclusive = true; + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $schemaValue = $schemaExcl; + $notOp += '='; + } else { + if ($exclIsNumber) $schemaValue = Math[$isMax ? 'min' : 'max']($schemaExcl, $schema); + if ($schemaExcl === ($exclIsNumber ? $schemaValue : true)) { + $exclusive = true; + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $notOp += '='; + } else { + $exclusive = false; + $opStr += '='; + } + } + var $opExpr = '\'' + $opStr + '\''; + out += ' if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' || ' + ($data) + ' !== ' + ($data) + ') { '; + } + } + $errorKeyword = $errorKeyword || $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limit') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { comparison: ' + ($opExpr) + ', limit: ' + ($schemaValue) + ', exclusive: ' + ($exclusive) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be ' + ($opStr) + ' '; + if ($isData) { + out += '\' + ' + ($schemaValue); + } else { + out += '' + ($schemaValue) + '\''; + } + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/_limitItems.js b/src/node_modules/ajv/lib/dotjs/_limitItems.js new file mode 100644 index 0000000..a27d118 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/_limitItems.js @@ -0,0 +1,77 @@ +'use strict'; +module.exports = function generate__limitItems(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $op = $keyword == 'maxItems' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($data) + '.length ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have '; + if ($keyword == 'maxItems') { + out += 'more'; + } else { + out += 'fewer'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' items\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/_limitLength.js b/src/node_modules/ajv/lib/dotjs/_limitLength.js new file mode 100644 index 0000000..789f374 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/_limitLength.js @@ -0,0 +1,82 @@ +'use strict'; +module.exports = function generate__limitLength(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $op = $keyword == 'maxLength' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + if (it.opts.unicode === false) { + out += ' ' + ($data) + '.length '; + } else { + out += ' ucs2length(' + ($data) + ') '; + } + out += ' ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitLength') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be '; + if ($keyword == 'maxLength') { + out += 'longer'; + } else { + out += 'shorter'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' characters\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/_limitProperties.js b/src/node_modules/ajv/lib/dotjs/_limitProperties.js new file mode 100644 index 0000000..11dc939 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/_limitProperties.js @@ -0,0 +1,77 @@ +'use strict'; +module.exports = function generate__limitProperties(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $op = $keyword == 'maxProperties' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' Object.keys(' + ($data) + ').length ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitProperties') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have '; + if ($keyword == 'maxProperties') { + out += 'more'; + } else { + out += 'fewer'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' properties\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/allOf.js b/src/node_modules/ajv/lib/dotjs/allOf.js new file mode 100644 index 0000000..5107b18 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/allOf.js @@ -0,0 +1,43 @@ +'use strict'; +module.exports = function generate_allOf(it, $keyword, $ruleType) { + var out = ' '; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $currentBaseId = $it.baseId, + $allSchemasEmpty = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + $allSchemasEmpty = false; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if ($breakOnError) { + if ($allSchemasEmpty) { + out += ' if (true) { '; + } else { + out += ' ' + ($closingBraces.slice(0, -1)) + ' '; + } + } + out = it.util.cleanUpCode(out); + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/anyOf.js b/src/node_modules/ajv/lib/dotjs/anyOf.js new file mode 100644 index 0000000..819c6f8 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/anyOf.js @@ -0,0 +1,74 @@ +'use strict'; +module.exports = function generate_anyOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $noEmptySchema = $schema.every(function($sch) { + return it.util.schemaHasRules($sch, it.RULES.all); + }); + if ($noEmptySchema) { + var $currentBaseId = $it.baseId; + out += ' var ' + ($errs) + ' = errors; var ' + ($valid) + ' = false; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($valid) + ' || ' + ($nextValid) + '; if (!' + ($valid) + ') { '; + $closingBraces += '}'; + } + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($closingBraces) + ' if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('anyOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should match some schema in anyOf\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += ' } else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + if (it.opts.allErrors) { + out += ' } '; + } + out = it.util.cleanUpCode(out); + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/comment.js b/src/node_modules/ajv/lib/dotjs/comment.js new file mode 100644 index 0000000..dd66bb8 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/comment.js @@ -0,0 +1,14 @@ +'use strict'; +module.exports = function generate_comment(it, $keyword, $ruleType) { + var out = ' '; + var $schema = it.schema[$keyword]; + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $comment = it.util.toQuotedString($schema); + if (it.opts.$comment === true) { + out += ' console.log(' + ($comment) + ');'; + } else if (typeof it.opts.$comment == 'function') { + out += ' self._opts.$comment(' + ($comment) + ', ' + (it.util.toQuotedString($errSchemaPath)) + ', validate.root.schema);'; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/const.js b/src/node_modules/ajv/lib/dotjs/const.js new file mode 100644 index 0000000..15b7c61 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/const.js @@ -0,0 +1,56 @@ +'use strict'; +module.exports = function generate_const(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (!$isData) { + out += ' var schema' + ($lvl) + ' = validate.schema' + ($schemaPath) + ';'; + } + out += 'var ' + ($valid) + ' = equal(' + ($data) + ', schema' + ($lvl) + '); if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('const') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { allowedValue: schema' + ($lvl) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be equal to constant\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' }'; + if ($breakOnError) { + out += ' else { '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/contains.js b/src/node_modules/ajv/lib/dotjs/contains.js new file mode 100644 index 0000000..8899ce5 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/contains.js @@ -0,0 +1,82 @@ +'use strict'; +module.exports = function generate_contains(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $idx = 'i' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $currentBaseId = it.baseId, + $nonEmptySchema = it.util.schemaHasRules($schema, it.RULES.all); + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if ($nonEmptySchema) { + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($nextValid) + ' = false; for (var ' + ($idx) + ' = 0; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' if (' + ($nextValid) + ') break; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($closingBraces) + ' if (!' + ($nextValid) + ') {'; + } else { + out += ' if (' + ($data) + '.length == 0) {'; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('contains') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should contain a valid item\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + if ($nonEmptySchema) { + out += ' errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + } + if (it.opts.allErrors) { + out += ' } '; + } + out = it.util.cleanUpCode(out); + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/custom.js b/src/node_modules/ajv/lib/dotjs/custom.js new file mode 100644 index 0000000..f3e641e --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/custom.js @@ -0,0 +1,228 @@ +'use strict'; +module.exports = function generate_custom(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $rule = this, + $definition = 'definition' + $lvl, + $rDef = $rule.definition, + $closingBraces = ''; + var $compile, $inline, $macro, $ruleValidate, $validateCode; + if ($isData && $rDef.$data) { + $validateCode = 'keywordValidate' + $lvl; + var $validateSchema = $rDef.validateSchema; + out += ' var ' + ($definition) + ' = RULES.custom[\'' + ($keyword) + '\'].definition; var ' + ($validateCode) + ' = ' + ($definition) + '.validate;'; + } else { + $ruleValidate = it.useCustomRule($rule, $schema, it.schema, it); + if (!$ruleValidate) return; + $schemaValue = 'validate.schema' + $schemaPath; + $validateCode = $ruleValidate.code; + $compile = $rDef.compile; + $inline = $rDef.inline; + $macro = $rDef.macro; + } + var $ruleErrs = $validateCode + '.errors', + $i = 'i' + $lvl, + $ruleErr = 'ruleErr' + $lvl, + $asyncKeyword = $rDef.async; + if ($asyncKeyword && !it.async) throw new Error('async keyword in sync schema'); + if (!($inline || $macro)) { + out += '' + ($ruleErrs) + ' = null;'; + } + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if ($isData && $rDef.$data) { + $closingBraces += '}'; + out += ' if (' + ($schemaValue) + ' === undefined) { ' + ($valid) + ' = true; } else { '; + if ($validateSchema) { + $closingBraces += '}'; + out += ' ' + ($valid) + ' = ' + ($definition) + '.validateSchema(' + ($schemaValue) + '); if (' + ($valid) + ') { '; + } + } + if ($inline) { + if ($rDef.statements) { + out += ' ' + ($ruleValidate.validate) + ' '; + } else { + out += ' ' + ($valid) + ' = ' + ($ruleValidate.validate) + '; '; + } + } else if ($macro) { + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + $it.schema = $ruleValidate.validate; + $it.schemaPath = ''; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var $code = it.validate($it).replace(/validate\.schema/g, $validateCode); + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($code); + } else { + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; + out += ' ' + ($validateCode) + '.call( '; + if (it.opts.passContext) { + out += 'this'; + } else { + out += 'self'; + } + if ($compile || $rDef.schema === false) { + out += ' , ' + ($data) + ' '; + } else { + out += ' , ' + ($schemaValue) + ' , ' + ($data) + ' , validate.schema' + (it.schemaPath) + ' '; + } + out += ' , (dataPath || \'\')'; + if (it.errorPath != '""') { + out += ' + ' + (it.errorPath); + } + var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData', + $parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty'; + out += ' , ' + ($parentData) + ' , ' + ($parentDataProperty) + ' , rootData ) '; + var def_callRuleValidate = out; + out = $$outStack.pop(); + if ($rDef.errors === false) { + out += ' ' + ($valid) + ' = '; + if ($asyncKeyword) { + out += 'await '; + } + out += '' + (def_callRuleValidate) + '; '; + } else { + if ($asyncKeyword) { + $ruleErrs = 'customErrors' + $lvl; + out += ' var ' + ($ruleErrs) + ' = null; try { ' + ($valid) + ' = await ' + (def_callRuleValidate) + '; } catch (e) { ' + ($valid) + ' = false; if (e instanceof ValidationError) ' + ($ruleErrs) + ' = e.errors; else throw e; } '; + } else { + out += ' ' + ($ruleErrs) + ' = null; ' + ($valid) + ' = ' + (def_callRuleValidate) + '; '; + } + } + } + if ($rDef.modifying) { + out += ' if (' + ($parentData) + ') ' + ($data) + ' = ' + ($parentData) + '[' + ($parentDataProperty) + '];'; + } + out += '' + ($closingBraces); + if ($rDef.valid) { + if ($breakOnError) { + out += ' if (true) { '; + } + } else { + out += ' if ( '; + if ($rDef.valid === undefined) { + out += ' !'; + if ($macro) { + out += '' + ($nextValid); + } else { + out += '' + ($valid); + } + } else { + out += ' ' + (!$rDef.valid) + ' '; + } + out += ') { '; + $errorKeyword = $rule.keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'custom') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { keyword: \'' + ($rule.keyword) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should pass "' + ($rule.keyword) + '" keyword validation\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + var def_customError = out; + out = $$outStack.pop(); + if ($inline) { + if ($rDef.errors) { + if ($rDef.errors != 'full') { + out += ' for (var ' + ($i) + '=' + ($errs) + '; ' + ($i) + '= 0) { + if ($breakOnError) { + out += ' if (true) { '; + } + return out; + } else { + throw new Error('unknown format "' + $schema + '" is used in schema at path "' + it.errSchemaPath + '"'); + } + } + var $isObject = typeof $format == 'object' && !($format instanceof RegExp) && $format.validate; + var $formatType = $isObject && $format.type || 'string'; + if ($isObject) { + var $async = $format.async === true; + $format = $format.validate; + } + if ($formatType != $ruleType) { + if ($breakOnError) { + out += ' if (true) { '; + } + return out; + } + if ($async) { + if (!it.async) throw new Error('async format in sync schema'); + var $formatRef = 'formats' + it.util.getProperty($schema) + '.validate'; + out += ' if (!(await ' + ($formatRef) + '(' + ($data) + '))) { '; + } else { + out += ' if (! '; + var $formatRef = 'formats' + it.util.getProperty($schema); + if ($isObject) $formatRef += '.validate'; + if (typeof $format == 'function') { + out += ' ' + ($formatRef) + '(' + ($data) + ') '; + } else { + out += ' ' + ($formatRef) + '.test(' + ($data) + ') '; + } + out += ') { '; + } + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('format') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { format: '; + if ($isData) { + out += '' + ($schemaValue); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match format "'; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + (it.util.escapeQuotes($schema)); + } + out += '"\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/if.js b/src/node_modules/ajv/lib/dotjs/if.js new file mode 100644 index 0000000..eff9090 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/if.js @@ -0,0 +1,104 @@ +'use strict'; +module.exports = function generate_if(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + $it.level++; + var $nextValid = 'valid' + $it.level; + var $thenSch = it.schema['then'], + $elseSch = it.schema['else'], + $thenPresent = $thenSch !== undefined && it.util.schemaHasRules($thenSch, it.RULES.all), + $elsePresent = $elseSch !== undefined && it.util.schemaHasRules($elseSch, it.RULES.all), + $currentBaseId = $it.baseId; + if ($thenPresent || $elsePresent) { + var $ifClause; + $it.createErrors = false; + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($errs) + ' = errors; var ' + ($valid) + ' = true; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + $it.createErrors = true; + out += ' errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + if ($thenPresent) { + out += ' if (' + ($nextValid) + ') { '; + $it.schema = it.schema['then']; + $it.schemaPath = it.schemaPath + '.then'; + $it.errSchemaPath = it.errSchemaPath + '/then'; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($nextValid) + '; '; + if ($thenPresent && $elsePresent) { + $ifClause = 'ifClause' + $lvl; + out += ' var ' + ($ifClause) + ' = \'then\'; '; + } else { + $ifClause = '\'then\''; + } + out += ' } '; + if ($elsePresent) { + out += ' else { '; + } + } else { + out += ' if (!' + ($nextValid) + ') { '; + } + if ($elsePresent) { + $it.schema = it.schema['else']; + $it.schemaPath = it.schemaPath + '.else'; + $it.errSchemaPath = it.errSchemaPath + '/else'; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($nextValid) + '; '; + if ($thenPresent && $elsePresent) { + $ifClause = 'ifClause' + $lvl; + out += ' var ' + ($ifClause) + ' = \'else\'; '; + } else { + $ifClause = '\'else\''; + } + out += ' } '; + } + out += ' if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('if') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { failingKeyword: ' + ($ifClause) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match "\' + ' + ($ifClause) + ' + \'" schema\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + out = it.util.cleanUpCode(out); + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/index.js b/src/node_modules/ajv/lib/dotjs/index.js new file mode 100644 index 0000000..2fb1b00 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/index.js @@ -0,0 +1,33 @@ +'use strict'; + +//all requires must be explicit because browserify won't work with dynamic requires +module.exports = { + '$ref': require('./ref'), + allOf: require('./allOf'), + anyOf: require('./anyOf'), + '$comment': require('./comment'), + const: require('./const'), + contains: require('./contains'), + dependencies: require('./dependencies'), + 'enum': require('./enum'), + format: require('./format'), + 'if': require('./if'), + items: require('./items'), + maximum: require('./_limit'), + minimum: require('./_limit'), + maxItems: require('./_limitItems'), + minItems: require('./_limitItems'), + maxLength: require('./_limitLength'), + minLength: require('./_limitLength'), + maxProperties: require('./_limitProperties'), + minProperties: require('./_limitProperties'), + multipleOf: require('./multipleOf'), + not: require('./not'), + oneOf: require('./oneOf'), + pattern: require('./pattern'), + properties: require('./properties'), + propertyNames: require('./propertyNames'), + required: require('./required'), + uniqueItems: require('./uniqueItems'), + validate: require('./validate') +}; diff --git a/src/node_modules/ajv/lib/dotjs/items.js b/src/node_modules/ajv/lib/dotjs/items.js new file mode 100644 index 0000000..99ce738 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/items.js @@ -0,0 +1,141 @@ +'use strict'; +module.exports = function generate_items(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $idx = 'i' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $currentBaseId = it.baseId; + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if (Array.isArray($schema)) { + var $additionalItems = it.schema.additionalItems; + if ($additionalItems === false) { + out += ' ' + ($valid) + ' = ' + ($data) + '.length <= ' + ($schema.length) + '; '; + var $currErrSchemaPath = $errSchemaPath; + $errSchemaPath = it.errSchemaPath + '/additionalItems'; + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('additionalItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schema.length) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have more than ' + ($schema.length) + ' items\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + $errSchemaPath = $currErrSchemaPath; + if ($breakOnError) { + $closingBraces += '}'; + out += ' else { '; + } + } + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + out += ' ' + ($nextValid) + ' = true; if (' + ($data) + '.length > ' + ($i) + ') { '; + var $passData = $data + '[' + $i + ']'; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + $it.errorPath = it.util.getPathExpr(it.errorPath, $i, it.opts.jsonPointers, true); + $it.dataPathArr[$dataNxt] = $i; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if (typeof $additionalItems == 'object' && it.util.schemaHasRules($additionalItems, it.RULES.all)) { + $it.schema = $additionalItems; + $it.schemaPath = it.schemaPath + '.additionalItems'; + $it.errSchemaPath = it.errSchemaPath + '/additionalItems'; + out += ' ' + ($nextValid) + ' = true; if (' + ($data) + '.length > ' + ($schema.length) + ') { for (var ' + ($idx) + ' = ' + ($schema.length) + '; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' } } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } else if (it.util.schemaHasRules($schema, it.RULES.all)) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' for (var ' + ($idx) + ' = ' + (0) + '; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' }'; + } + if ($breakOnError) { + out += ' ' + ($closingBraces) + ' if (' + ($errs) + ' == errors) {'; + } + out = it.util.cleanUpCode(out); + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/multipleOf.js b/src/node_modules/ajv/lib/dotjs/multipleOf.js new file mode 100644 index 0000000..af087d2 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/multipleOf.js @@ -0,0 +1,77 @@ +'use strict'; +module.exports = function generate_multipleOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + out += 'var division' + ($lvl) + ';if ('; + if ($isData) { + out += ' ' + ($schemaValue) + ' !== undefined && ( typeof ' + ($schemaValue) + ' != \'number\' || '; + } + out += ' (division' + ($lvl) + ' = ' + ($data) + ' / ' + ($schemaValue) + ', '; + if (it.opts.multipleOfPrecision) { + out += ' Math.abs(Math.round(division' + ($lvl) + ') - division' + ($lvl) + ') > 1e-' + (it.opts.multipleOfPrecision) + ' '; + } else { + out += ' division' + ($lvl) + ' !== parseInt(division' + ($lvl) + ') '; + } + out += ' ) '; + if ($isData) { + out += ' ) '; + } + out += ' ) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('multipleOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { multipleOf: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be multiple of '; + if ($isData) { + out += '\' + ' + ($schemaValue); + } else { + out += '' + ($schemaValue) + '\''; + } + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/not.js b/src/node_modules/ajv/lib/dotjs/not.js new file mode 100644 index 0000000..c8f8af7 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/not.js @@ -0,0 +1,84 @@ +'use strict'; +module.exports = function generate_not(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + $it.level++; + var $nextValid = 'valid' + $it.level; + if (it.util.schemaHasRules($schema, it.RULES.all)) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($errs) + ' = errors; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.createErrors = false; + var $allErrorsOption; + if ($it.opts.allErrors) { + $allErrorsOption = $it.opts.allErrors; + $it.opts.allErrors = false; + } + out += ' ' + (it.validate($it)) + ' '; + $it.createErrors = true; + if ($allErrorsOption) $it.opts.allErrors = $allErrorsOption; + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' if (' + ($nextValid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('not') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be valid\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + if (it.opts.allErrors) { + out += ' } '; + } + } else { + out += ' var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('not') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be valid\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if ($breakOnError) { + out += ' if (false) { '; + } + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/oneOf.js b/src/node_modules/ajv/lib/dotjs/oneOf.js new file mode 100644 index 0000000..e9df453 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/oneOf.js @@ -0,0 +1,73 @@ +'use strict'; +module.exports = function generate_oneOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $currentBaseId = $it.baseId, + $prevValid = 'prevValid' + $lvl, + $passingSchemas = 'passingSchemas' + $lvl; + out += 'var ' + ($errs) + ' = errors , ' + ($prevValid) + ' = false , ' + ($valid) + ' = false , ' + ($passingSchemas) + ' = null; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + } else { + out += ' var ' + ($nextValid) + ' = true; '; + } + if ($i) { + out += ' if (' + ($nextValid) + ' && ' + ($prevValid) + ') { ' + ($valid) + ' = false; ' + ($passingSchemas) + ' = [' + ($passingSchemas) + ', ' + ($i) + ']; } else { '; + $closingBraces += '}'; + } + out += ' if (' + ($nextValid) + ') { ' + ($valid) + ' = ' + ($prevValid) + ' = true; ' + ($passingSchemas) + ' = ' + ($i) + '; }'; + } + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += '' + ($closingBraces) + 'if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('oneOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { passingSchemas: ' + ($passingSchemas) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match exactly one schema in oneOf\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += '} else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; }'; + if (it.opts.allErrors) { + out += ' } '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/pattern.js b/src/node_modules/ajv/lib/dotjs/pattern.js new file mode 100644 index 0000000..1d74d6b --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/pattern.js @@ -0,0 +1,75 @@ +'use strict'; +module.exports = function generate_pattern(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $regexp = $isData ? '(new RegExp(' + $schemaValue + '))' : it.usePattern($schema); + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'string\') || '; + } + out += ' !' + ($regexp) + '.test(' + ($data) + ') ) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('pattern') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { pattern: '; + if ($isData) { + out += '' + ($schemaValue); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match pattern "'; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + (it.util.escapeQuotes($schema)); + } + out += '"\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/properties.js b/src/node_modules/ajv/lib/dotjs/properties.js new file mode 100644 index 0000000..7d2ea86 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/properties.js @@ -0,0 +1,330 @@ +'use strict'; +module.exports = function generate_properties(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $key = 'key' + $lvl, + $idx = 'idx' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $dataProperties = 'dataProperties' + $lvl; + var $schemaKeys = Object.keys($schema || {}), + $pProperties = it.schema.patternProperties || {}, + $pPropertyKeys = Object.keys($pProperties), + $aProperties = it.schema.additionalProperties, + $someProperties = $schemaKeys.length || $pPropertyKeys.length, + $noAdditional = $aProperties === false, + $additionalIsSchema = typeof $aProperties == 'object' && Object.keys($aProperties).length, + $removeAdditional = it.opts.removeAdditional, + $checkAdditional = $noAdditional || $additionalIsSchema || $removeAdditional, + $ownProperties = it.opts.ownProperties, + $currentBaseId = it.baseId; + var $required = it.schema.required; + if ($required && !(it.opts.$data && $required.$data) && $required.length < it.opts.loopRequired) var $requiredHash = it.util.toHash($required); + out += 'var ' + ($errs) + ' = errors;var ' + ($nextValid) + ' = true;'; + if ($ownProperties) { + out += ' var ' + ($dataProperties) + ' = undefined;'; + } + if ($checkAdditional) { + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + if ($someProperties) { + out += ' var isAdditional' + ($lvl) + ' = !(false '; + if ($schemaKeys.length) { + if ($schemaKeys.length > 8) { + out += ' || validate.schema' + ($schemaPath) + '.hasOwnProperty(' + ($key) + ') '; + } else { + var arr1 = $schemaKeys; + if (arr1) { + var $propertyKey, i1 = -1, + l1 = arr1.length - 1; + while (i1 < l1) { + $propertyKey = arr1[i1 += 1]; + out += ' || ' + ($key) + ' == ' + (it.util.toQuotedString($propertyKey)) + ' '; + } + } + } + } + if ($pPropertyKeys.length) { + var arr2 = $pPropertyKeys; + if (arr2) { + var $pProperty, $i = -1, + l2 = arr2.length - 1; + while ($i < l2) { + $pProperty = arr2[$i += 1]; + out += ' || ' + (it.usePattern($pProperty)) + '.test(' + ($key) + ') '; + } + } + } + out += ' ); if (isAdditional' + ($lvl) + ') { '; + } + if ($removeAdditional == 'all') { + out += ' delete ' + ($data) + '[' + ($key) + ']; '; + } else { + var $currentErrorPath = it.errorPath; + var $additionalProperty = '\' + ' + $key + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + } + if ($noAdditional) { + if ($removeAdditional) { + out += ' delete ' + ($data) + '[' + ($key) + ']; '; + } else { + out += ' ' + ($nextValid) + ' = false; '; + var $currErrSchemaPath = $errSchemaPath; + $errSchemaPath = it.errSchemaPath + '/additionalProperties'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('additionalProperties') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { additionalProperty: \'' + ($additionalProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is an invalid additional property'; + } else { + out += 'should NOT have additional properties'; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + $errSchemaPath = $currErrSchemaPath; + if ($breakOnError) { + out += ' break; '; + } + } + } else if ($additionalIsSchema) { + if ($removeAdditional == 'failing') { + out += ' var ' + ($errs) + ' = errors; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.schema = $aProperties; + $it.schemaPath = it.schemaPath + '.additionalProperties'; + $it.errSchemaPath = it.errSchemaPath + '/additionalProperties'; + $it.errorPath = it.opts._errorDataPathProperty ? it.errorPath : it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' if (!' + ($nextValid) + ') { errors = ' + ($errs) + '; if (validate.errors !== null) { if (errors) validate.errors.length = errors; else validate.errors = null; } delete ' + ($data) + '[' + ($key) + ']; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + } else { + $it.schema = $aProperties; + $it.schemaPath = it.schemaPath + '.additionalProperties'; + $it.errSchemaPath = it.errSchemaPath + '/additionalProperties'; + $it.errorPath = it.opts._errorDataPathProperty ? it.errorPath : it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + } + } + it.errorPath = $currentErrorPath; + } + if ($someProperties) { + out += ' } '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + var $useDefaults = it.opts.useDefaults && !it.compositeRule; + if ($schemaKeys.length) { + var arr3 = $schemaKeys; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $sch = $schema[$propertyKey]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + var $prop = it.util.getProperty($propertyKey), + $passData = $data + $prop, + $hasDefault = $useDefaults && $sch.default !== undefined; + $it.schema = $sch; + $it.schemaPath = $schemaPath + $prop; + $it.errSchemaPath = $errSchemaPath + '/' + it.util.escapeFragment($propertyKey); + $it.errorPath = it.util.getPath(it.errorPath, $propertyKey, it.opts.jsonPointers); + $it.dataPathArr[$dataNxt] = it.util.toQuotedString($propertyKey); + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + $code = it.util.varReplace($code, $nextData, $passData); + var $useData = $passData; + } else { + var $useData = $nextData; + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; '; + } + if ($hasDefault) { + out += ' ' + ($code) + ' '; + } else { + if ($requiredHash && $requiredHash[$propertyKey]) { + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { ' + ($nextValid) + ' = false; '; + var $currentErrorPath = it.errorPath, + $currErrSchemaPath = $errSchemaPath, + $missingProperty = it.util.escapeQuotes($propertyKey); + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + $errSchemaPath = it.errSchemaPath + '/required'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + $errSchemaPath = $currErrSchemaPath; + it.errorPath = $currentErrorPath; + out += ' } else { '; + } else { + if ($breakOnError) { + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { ' + ($nextValid) + ' = true; } else { '; + } else { + out += ' if (' + ($useData) + ' !== undefined '; + if ($ownProperties) { + out += ' && Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ' ) { '; + } + } + out += ' ' + ($code) + ' } '; + } + } + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if ($pPropertyKeys.length) { + var arr4 = $pPropertyKeys; + if (arr4) { + var $pProperty, i4 = -1, + l4 = arr4.length - 1; + while (i4 < l4) { + $pProperty = arr4[i4 += 1]; + var $sch = $pProperties[$pProperty]; + if (it.util.schemaHasRules($sch, it.RULES.all)) { + $it.schema = $sch; + $it.schemaPath = it.schemaPath + '.patternProperties' + it.util.getProperty($pProperty); + $it.errSchemaPath = it.errSchemaPath + '/patternProperties/' + it.util.escapeFragment($pProperty); + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + out += ' if (' + (it.usePattern($pProperty)) + '.test(' + ($key) + ')) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else ' + ($nextValid) + ' = true; '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces) + ' if (' + ($errs) + ' == errors) {'; + } + out = it.util.cleanUpCode(out); + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/propertyNames.js b/src/node_modules/ajv/lib/dotjs/propertyNames.js new file mode 100644 index 0000000..c86a8cb --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/propertyNames.js @@ -0,0 +1,82 @@ +'use strict'; +module.exports = function generate_propertyNames(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + out += 'var ' + ($errs) + ' = errors;'; + if (it.util.schemaHasRules($schema, it.RULES.all)) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + var $key = 'key' + $lvl, + $idx = 'idx' + $lvl, + $i = 'i' + $lvl, + $invalidName = '\' + ' + $key + ' + \'', + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $dataProperties = 'dataProperties' + $lvl, + $ownProperties = it.opts.ownProperties, + $currentBaseId = it.baseId; + if ($ownProperties) { + out += ' var ' + ($dataProperties) + ' = undefined; '; + } + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + out += ' var startErrs' + ($lvl) + ' = errors; '; + var $passData = $key; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' if (!' + ($nextValid) + ') { for (var ' + ($i) + '=startErrs' + ($lvl) + '; ' + ($i) + '= it.opts.loopRequired, + $ownProperties = it.opts.ownProperties; + if ($breakOnError) { + out += ' var missing' + ($lvl) + '; '; + if ($loopRequired) { + if (!$isData) { + out += ' var ' + ($vSchema) + ' = validate.schema' + ($schemaPath) + '; '; + } + var $i = 'i' + $lvl, + $propertyPath = 'schema' + $lvl + '[' + $i + ']', + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr($currentErrorPath, $propertyPath, it.opts.jsonPointers); + } + out += ' var ' + ($valid) + ' = true; '; + if ($isData) { + out += ' if (schema' + ($lvl) + ' === undefined) ' + ($valid) + ' = true; else if (!Array.isArray(schema' + ($lvl) + ')) ' + ($valid) + ' = false; else {'; + } + out += ' for (var ' + ($i) + ' = 0; ' + ($i) + ' < ' + ($vSchema) + '.length; ' + ($i) + '++) { ' + ($valid) + ' = ' + ($data) + '[' + ($vSchema) + '[' + ($i) + ']] !== undefined '; + if ($ownProperties) { + out += ' && Object.prototype.hasOwnProperty.call(' + ($data) + ', ' + ($vSchema) + '[' + ($i) + ']) '; + } + out += '; if (!' + ($valid) + ') break; } '; + if ($isData) { + out += ' } '; + } + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + } else { + out += ' if ( '; + var arr2 = $required; + if (arr2) { + var $propertyKey, $i = -1, + l2 = arr2.length - 1; + while ($i < l2) { + $propertyKey = arr2[$i += 1]; + if ($i) { + out += ' || '; + } + var $prop = it.util.getProperty($propertyKey), + $useData = $data + $prop; + out += ' ( ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') && (missing' + ($lvl) + ' = ' + (it.util.toQuotedString(it.opts.jsonPointers ? $propertyKey : $prop)) + ') ) '; + } + } + out += ') { '; + var $propertyPath = 'missing' + $lvl, + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.opts.jsonPointers ? it.util.getPathExpr($currentErrorPath, $propertyPath, true) : $currentErrorPath + ' + ' + $propertyPath; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + } + } else { + if ($loopRequired) { + if (!$isData) { + out += ' var ' + ($vSchema) + ' = validate.schema' + ($schemaPath) + '; '; + } + var $i = 'i' + $lvl, + $propertyPath = 'schema' + $lvl + '[' + $i + ']', + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr($currentErrorPath, $propertyPath, it.opts.jsonPointers); + } + if ($isData) { + out += ' if (' + ($vSchema) + ' && !Array.isArray(' + ($vSchema) + ')) { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } else if (' + ($vSchema) + ' !== undefined) { '; + } + out += ' for (var ' + ($i) + ' = 0; ' + ($i) + ' < ' + ($vSchema) + '.length; ' + ($i) + '++) { if (' + ($data) + '[' + ($vSchema) + '[' + ($i) + ']] === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', ' + ($vSchema) + '[' + ($i) + ']) '; + } + out += ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } } '; + if ($isData) { + out += ' } '; + } + } else { + var arr3 = $required; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $prop = it.util.getProperty($propertyKey), + $missingProperty = it.util.escapeQuotes($propertyKey), + $useData = $data + $prop; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } '; + } + } + } + } + it.errorPath = $currentErrorPath; + } else if ($breakOnError) { + out += ' if (true) {'; + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/uniqueItems.js b/src/node_modules/ajv/lib/dotjs/uniqueItems.js new file mode 100644 index 0000000..c4f6536 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/uniqueItems.js @@ -0,0 +1,86 @@ +'use strict'; +module.exports = function generate_uniqueItems(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (($schema || $isData) && it.opts.uniqueItems !== false) { + if ($isData) { + out += ' var ' + ($valid) + '; if (' + ($schemaValue) + ' === false || ' + ($schemaValue) + ' === undefined) ' + ($valid) + ' = true; else if (typeof ' + ($schemaValue) + ' != \'boolean\') ' + ($valid) + ' = false; else { '; + } + out += ' var i = ' + ($data) + '.length , ' + ($valid) + ' = true , j; if (i > 1) { '; + var $itemType = it.schema.items && it.schema.items.type, + $typeIsArray = Array.isArray($itemType); + if (!$itemType || $itemType == 'object' || $itemType == 'array' || ($typeIsArray && ($itemType.indexOf('object') >= 0 || $itemType.indexOf('array') >= 0))) { + out += ' outer: for (;i--;) { for (j = i; j--;) { if (equal(' + ($data) + '[i], ' + ($data) + '[j])) { ' + ($valid) + ' = false; break outer; } } } '; + } else { + out += ' var itemIndices = {}, item; for (;i--;) { var item = ' + ($data) + '[i]; '; + var $method = 'checkDataType' + ($typeIsArray ? 's' : ''); + out += ' if (' + (it.util[$method]($itemType, 'item', true)) + ') continue; '; + if ($typeIsArray) { + out += ' if (typeof item == \'string\') item = \'"\' + item; '; + } + out += ' if (typeof itemIndices[item] == \'number\') { ' + ($valid) + ' = false; j = itemIndices[item]; break; } itemIndices[item] = i; } '; + } + out += ' } '; + if ($isData) { + out += ' } '; + } + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('uniqueItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { i: i, j: j } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have duplicate items (items ## \' + j + \' and \' + i + \' are identical)\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} diff --git a/src/node_modules/ajv/lib/dotjs/validate.js b/src/node_modules/ajv/lib/dotjs/validate.js new file mode 100644 index 0000000..cd0efc8 --- /dev/null +++ b/src/node_modules/ajv/lib/dotjs/validate.js @@ -0,0 +1,494 @@ +'use strict'; +module.exports = function generate_validate(it, $keyword, $ruleType) { + var out = ''; + var $async = it.schema.$async === true, + $refKeywords = it.util.schemaHasRulesExcept(it.schema, it.RULES.all, '$ref'), + $id = it.self._getId(it.schema); + if (it.opts.strictKeywords) { + var $unknownKwd = it.util.schemaUnknownRules(it.schema, it.RULES.keywords); + if ($unknownKwd) { + var $keywordsMsg = 'unknown keyword: ' + $unknownKwd; + if (it.opts.strictKeywords === 'log') it.logger.warn($keywordsMsg); + else throw new Error($keywordsMsg); + } + } + if (it.isTop) { + out += ' var validate = '; + if ($async) { + it.async = true; + out += 'async '; + } + out += 'function(data, dataPath, parentData, parentDataProperty, rootData) { \'use strict\'; '; + if ($id && (it.opts.sourceCode || it.opts.processCode)) { + out += ' ' + ('/\*# sourceURL=' + $id + ' */') + ' '; + } + } + if (typeof it.schema == 'boolean' || !($refKeywords || it.schema.$ref)) { + var $keyword = 'false schema'; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + if (it.schema === false) { + if (it.isTop) { + $breakOnError = true; + } else { + out += ' var ' + ($valid) + ' = false; '; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'false schema') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'boolean schema is false\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + } else { + if (it.isTop) { + if ($async) { + out += ' return data; '; + } else { + out += ' validate.errors = null; return true; '; + } + } else { + out += ' var ' + ($valid) + ' = true; '; + } + } + if (it.isTop) { + out += ' }; return validate; '; + } + return out; + } + if (it.isTop) { + var $top = it.isTop, + $lvl = it.level = 0, + $dataLvl = it.dataLevel = 0, + $data = 'data'; + it.rootId = it.resolve.fullPath(it.self._getId(it.root.schema)); + it.baseId = it.baseId || it.rootId; + delete it.isTop; + it.dataPathArr = [undefined]; + if (it.schema.default !== undefined && it.opts.useDefaults && it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored in the schema root'; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + out += ' var vErrors = null; '; + out += ' var errors = 0; '; + out += ' if (rootData === undefined) rootData = data; '; + } else { + var $lvl = it.level, + $dataLvl = it.dataLevel, + $data = 'data' + ($dataLvl || ''); + if ($id) it.baseId = it.resolve.url(it.baseId, $id); + if ($async && !it.async) throw new Error('async schema in sync schema'); + out += ' var errs_' + ($lvl) + ' = errors;'; + } + var $valid = 'valid' + $lvl, + $breakOnError = !it.opts.allErrors, + $closingBraces1 = '', + $closingBraces2 = ''; + var $errorKeyword; + var $typeSchema = it.schema.type, + $typeIsArray = Array.isArray($typeSchema); + if ($typeSchema && it.opts.nullable && it.schema.nullable === true) { + if ($typeIsArray) { + if ($typeSchema.indexOf('null') == -1) $typeSchema = $typeSchema.concat('null'); + } else if ($typeSchema != 'null') { + $typeSchema = [$typeSchema, 'null']; + $typeIsArray = true; + } + } + if ($typeIsArray && $typeSchema.length == 1) { + $typeSchema = $typeSchema[0]; + $typeIsArray = false; + } + if (it.schema.$ref && $refKeywords) { + if (it.opts.extendRefs == 'fail') { + throw new Error('$ref: validation keywords used in schema at path "' + it.errSchemaPath + '" (see option extendRefs)'); + } else if (it.opts.extendRefs !== true) { + $refKeywords = false; + it.logger.warn('$ref: keywords ignored in schema at path "' + it.errSchemaPath + '"'); + } + } + if (it.schema.$comment && it.opts.$comment) { + out += ' ' + (it.RULES.all.$comment.code(it, '$comment')); + } + if ($typeSchema) { + if (it.opts.coerceTypes) { + var $coerceToTypes = it.util.coerceToTypes(it.opts.coerceTypes, $typeSchema); + } + var $rulesGroup = it.RULES.types[$typeSchema]; + if ($coerceToTypes || $typeIsArray || $rulesGroup === true || ($rulesGroup && !$shouldUseGroup($rulesGroup))) { + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type'; + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type', + $method = $typeIsArray ? 'checkDataTypes' : 'checkDataType'; + out += ' if (' + (it.util[$method]($typeSchema, $data, true)) + ') { '; + if ($coerceToTypes) { + var $dataType = 'dataType' + $lvl, + $coerced = 'coerced' + $lvl; + out += ' var ' + ($dataType) + ' = typeof ' + ($data) + '; '; + if (it.opts.coerceTypes == 'array') { + out += ' if (' + ($dataType) + ' == \'object\' && Array.isArray(' + ($data) + ')) ' + ($dataType) + ' = \'array\'; '; + } + out += ' var ' + ($coerced) + ' = undefined; '; + var $bracesCoercion = ''; + var arr1 = $coerceToTypes; + if (arr1) { + var $type, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $type = arr1[$i += 1]; + if ($i) { + out += ' if (' + ($coerced) + ' === undefined) { '; + $bracesCoercion += '}'; + } + if (it.opts.coerceTypes == 'array' && $type != 'array') { + out += ' if (' + ($dataType) + ' == \'array\' && ' + ($data) + '.length == 1) { ' + ($coerced) + ' = ' + ($data) + ' = ' + ($data) + '[0]; ' + ($dataType) + ' = typeof ' + ($data) + '; } '; + } + if ($type == 'string') { + out += ' if (' + ($dataType) + ' == \'number\' || ' + ($dataType) + ' == \'boolean\') ' + ($coerced) + ' = \'\' + ' + ($data) + '; else if (' + ($data) + ' === null) ' + ($coerced) + ' = \'\'; '; + } else if ($type == 'number' || $type == 'integer') { + out += ' if (' + ($dataType) + ' == \'boolean\' || ' + ($data) + ' === null || (' + ($dataType) + ' == \'string\' && ' + ($data) + ' && ' + ($data) + ' == +' + ($data) + ' '; + if ($type == 'integer') { + out += ' && !(' + ($data) + ' % 1)'; + } + out += ')) ' + ($coerced) + ' = +' + ($data) + '; '; + } else if ($type == 'boolean') { + out += ' if (' + ($data) + ' === \'false\' || ' + ($data) + ' === 0 || ' + ($data) + ' === null) ' + ($coerced) + ' = false; else if (' + ($data) + ' === \'true\' || ' + ($data) + ' === 1) ' + ($coerced) + ' = true; '; + } else if ($type == 'null') { + out += ' if (' + ($data) + ' === \'\' || ' + ($data) + ' === 0 || ' + ($data) + ' === false) ' + ($coerced) + ' = null; '; + } else if (it.opts.coerceTypes == 'array' && $type == 'array') { + out += ' if (' + ($dataType) + ' == \'string\' || ' + ($dataType) + ' == \'number\' || ' + ($dataType) + ' == \'boolean\' || ' + ($data) + ' == null) ' + ($coerced) + ' = [' + ($data) + ']; '; + } + } + } + out += ' ' + ($bracesCoercion) + ' if (' + ($coerced) + ' === undefined) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData', + $parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty'; + out += ' ' + ($data) + ' = ' + ($coerced) + '; '; + if (!$dataLvl) { + out += 'if (' + ($parentData) + ' !== undefined)'; + } + out += ' ' + ($parentData) + '[' + ($parentDataProperty) + '] = ' + ($coerced) + '; } '; + } else { + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + } + out += ' } '; + } + } + if (it.schema.$ref && !$refKeywords) { + out += ' ' + (it.RULES.all.$ref.code(it, '$ref')) + ' '; + if ($breakOnError) { + out += ' } if (errors === '; + if ($top) { + out += '0'; + } else { + out += 'errs_' + ($lvl); + } + out += ') { '; + $closingBraces2 += '}'; + } + } else { + var arr2 = it.RULES; + if (arr2) { + var $rulesGroup, i2 = -1, + l2 = arr2.length - 1; + while (i2 < l2) { + $rulesGroup = arr2[i2 += 1]; + if ($shouldUseGroup($rulesGroup)) { + if ($rulesGroup.type) { + out += ' if (' + (it.util.checkDataType($rulesGroup.type, $data)) + ') { '; + } + if (it.opts.useDefaults) { + if ($rulesGroup.type == 'object' && it.schema.properties) { + var $schema = it.schema.properties, + $schemaKeys = Object.keys($schema); + var arr3 = $schemaKeys; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $sch = $schema[$propertyKey]; + if ($sch.default !== undefined) { + var $passData = $data + it.util.getProperty($propertyKey); + if (it.compositeRule) { + if (it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored for: ' + $passData; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + } else { + out += ' if (' + ($passData) + ' === undefined '; + if (it.opts.useDefaults == 'empty') { + out += ' || ' + ($passData) + ' === null || ' + ($passData) + ' === \'\' '; + } + out += ' ) ' + ($passData) + ' = '; + if (it.opts.useDefaults == 'shared') { + out += ' ' + (it.useDefault($sch.default)) + ' '; + } else { + out += ' ' + (JSON.stringify($sch.default)) + ' '; + } + out += '; '; + } + } + } + } + } else if ($rulesGroup.type == 'array' && Array.isArray(it.schema.items)) { + var arr4 = it.schema.items; + if (arr4) { + var $sch, $i = -1, + l4 = arr4.length - 1; + while ($i < l4) { + $sch = arr4[$i += 1]; + if ($sch.default !== undefined) { + var $passData = $data + '[' + $i + ']'; + if (it.compositeRule) { + if (it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored for: ' + $passData; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + } else { + out += ' if (' + ($passData) + ' === undefined '; + if (it.opts.useDefaults == 'empty') { + out += ' || ' + ($passData) + ' === null || ' + ($passData) + ' === \'\' '; + } + out += ' ) ' + ($passData) + ' = '; + if (it.opts.useDefaults == 'shared') { + out += ' ' + (it.useDefault($sch.default)) + ' '; + } else { + out += ' ' + (JSON.stringify($sch.default)) + ' '; + } + out += '; '; + } + } + } + } + } + } + var arr5 = $rulesGroup.rules; + if (arr5) { + var $rule, i5 = -1, + l5 = arr5.length - 1; + while (i5 < l5) { + $rule = arr5[i5 += 1]; + if ($shouldUseRule($rule)) { + var $code = $rule.code(it, $rule.keyword, $rulesGroup.type); + if ($code) { + out += ' ' + ($code) + ' '; + if ($breakOnError) { + $closingBraces1 += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces1) + ' '; + $closingBraces1 = ''; + } + if ($rulesGroup.type) { + out += ' } '; + if ($typeSchema && $typeSchema === $rulesGroup.type && !$coerceToTypes) { + out += ' else { '; + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + } + } + if ($breakOnError) { + out += ' if (errors === '; + if ($top) { + out += '0'; + } else { + out += 'errs_' + ($lvl); + } + out += ') { '; + $closingBraces2 += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces2) + ' '; + } + if ($top) { + if ($async) { + out += ' if (errors === 0) return data; '; + out += ' else throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; '; + out += ' return errors === 0; '; + } + out += ' }; return validate;'; + } else { + out += ' var ' + ($valid) + ' = errors === errs_' + ($lvl) + ';'; + } + out = it.util.cleanUpCode(out); + if ($top) { + out = it.util.finalCleanUpCode(out, $async); + } + + function $shouldUseGroup($rulesGroup) { + var rules = $rulesGroup.rules; + for (var i = 0; i < rules.length; i++) + if ($shouldUseRule(rules[i])) return true; + } + + function $shouldUseRule($rule) { + return it.schema[$rule.keyword] !== undefined || ($rule.implements && $ruleImplementsSomeKeyword($rule)); + } + + function $ruleImplementsSomeKeyword($rule) { + var impl = $rule.implements; + for (var i = 0; i < impl.length; i++) + if (it.schema[impl[i]] !== undefined) return true; + } + return out; +} diff --git a/src/node_modules/ajv/lib/keyword.js b/src/node_modules/ajv/lib/keyword.js new file mode 100644 index 0000000..cf4d699 --- /dev/null +++ b/src/node_modules/ajv/lib/keyword.js @@ -0,0 +1,178 @@ +'use strict'; + +var IDENTIFIER = /^[a-z_$][a-z0-9_$-]*$/i; +var customRuleCode = require('./dotjs/custom'); +var metaSchema = require('./refs/json-schema-draft-07.json'); + +module.exports = { + add: addKeyword, + get: getKeyword, + remove: removeKeyword, + validate: validateKeyword +}; + +var definitionSchema = { + definitions: { + simpleTypes: metaSchema.definitions.simpleTypes + }, + type: 'object', + dependencies: { + schema: ['validate'], + $data: ['validate'], + statements: ['inline'], + valid: {not: {required: ['macro']}} + }, + properties: { + type: metaSchema.properties.type, + schema: {type: 'boolean'}, + statements: {type: 'boolean'}, + dependencies: { + type: 'array', + items: {type: 'string'} + }, + metaSchema: {type: 'object'}, + modifying: {type: 'boolean'}, + valid: {type: 'boolean'}, + $data: {type: 'boolean'}, + async: {type: 'boolean'}, + errors: { + anyOf: [ + {type: 'boolean'}, + {const: 'full'} + ] + } + } +}; + +/** + * Define custom keyword + * @this Ajv + * @param {String} keyword custom keyword, should be unique (including different from all standard, custom and macro keywords). + * @param {Object} definition keyword definition object with properties `type` (type(s) which the keyword applies to), `validate` or `compile`. + * @return {Ajv} this for method chaining + */ +function addKeyword(keyword, definition) { + /* jshint validthis: true */ + /* eslint no-shadow: 0 */ + var RULES = this.RULES; + if (RULES.keywords[keyword]) + throw new Error('Keyword ' + keyword + ' is already defined'); + + if (!IDENTIFIER.test(keyword)) + throw new Error('Keyword ' + keyword + ' is not a valid identifier'); + + if (definition) { + this.validateKeyword(definition, true); + + var dataType = definition.type; + if (Array.isArray(dataType)) { + for (var i=0; i ../ajv-dist/bower.json + cd ../ajv-dist + + if [[ `git status --porcelain` ]]; then + echo "Changes detected. Updating master branch..." + git add -A + git commit -m "updated by travis build #$TRAVIS_BUILD_NUMBER" + git push --quiet origin master > /dev/null 2>&1 + fi + + echo "Publishing tag..." + + git tag $TRAVIS_TAG + git push --tags > /dev/null 2>&1 + + echo "Done" +fi diff --git a/src/node_modules/ajv/scripts/travis-gh-pages b/src/node_modules/ajv/scripts/travis-gh-pages new file mode 100755 index 0000000..46ded16 --- /dev/null +++ b/src/node_modules/ajv/scripts/travis-gh-pages @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -e + +if [[ "$TRAVIS_BRANCH" == "master" && "$TRAVIS_PULL_REQUEST" == "false" && $TRAVIS_JOB_NUMBER =~ ".3" ]]; then + git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qE '\.md$|^LICENSE$|travis-gh-pages$' && { + rm -rf ../gh-pages + git clone -b gh-pages --single-branch https://${GITHUB_TOKEN}@github.com/epoberezkin/ajv.git ../gh-pages + mkdir -p ../gh-pages/_source + cp *.md ../gh-pages/_source + cp LICENSE ../gh-pages/_source + currentDir=$(pwd) + cd ../gh-pages + $currentDir/node_modules/.bin/gh-pages-generator + # remove logo from README + sed -i -E "s/]+ajv_logo[^>]+>//" index.md + git config user.email "$GIT_USER_EMAIL" + git config user.name "$GIT_USER_NAME" + git add . + git commit -am "updated by travis build #$TRAVIS_BUILD_NUMBER" + git push --quiet origin gh-pages > /dev/null 2>&1 + } +fi diff --git a/src/node_modules/asn1/LICENSE b/src/node_modules/asn1/LICENSE new file mode 100644 index 0000000..9b5dcdb --- /dev/null +++ b/src/node_modules/asn1/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011 Mark Cavage, All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE diff --git a/src/node_modules/asn1/README.md b/src/node_modules/asn1/README.md new file mode 100644 index 0000000..2208210 --- /dev/null +++ b/src/node_modules/asn1/README.md @@ -0,0 +1,50 @@ +node-asn1 is a library for encoding and decoding ASN.1 datatypes in pure JS. +Currently BER encoding is supported; at some point I'll likely have to do DER. + +## Usage + +Mostly, if you're *actually* needing to read and write ASN.1, you probably don't +need this readme to explain what and why. If you have no idea what ASN.1 is, +see this: ftp://ftp.rsa.com/pub/pkcs/ascii/layman.asc + +The source is pretty much self-explanatory, and has read/write methods for the +common types out there. + +### Decoding + +The following reads an ASN.1 sequence with a boolean. + + var Ber = require('asn1').Ber; + + var reader = new Ber.Reader(Buffer.from([0x30, 0x03, 0x01, 0x01, 0xff])); + + reader.readSequence(); + console.log('Sequence len: ' + reader.length); + if (reader.peek() === Ber.Boolean) + console.log(reader.readBoolean()); + +### Encoding + +The following generates the same payload as above. + + var Ber = require('asn1').Ber; + + var writer = new Ber.Writer(); + + writer.startSequence(); + writer.writeBoolean(true); + writer.endSequence(); + + console.log(writer.buffer); + +## Installation + + npm install asn1 + +## License + +MIT. + +## Bugs + +See . diff --git a/src/node_modules/asn1/lib/ber/errors.js b/src/node_modules/asn1/lib/ber/errors.js new file mode 100644 index 0000000..4557b8a --- /dev/null +++ b/src/node_modules/asn1/lib/ber/errors.js @@ -0,0 +1,13 @@ +// Copyright 2011 Mark Cavage All rights reserved. + + +module.exports = { + + newInvalidAsn1Error: function (msg) { + var e = new Error(); + e.name = 'InvalidAsn1Error'; + e.message = msg || ''; + return e; + } + +}; diff --git a/src/node_modules/asn1/lib/ber/index.js b/src/node_modules/asn1/lib/ber/index.js new file mode 100644 index 0000000..387d132 --- /dev/null +++ b/src/node_modules/asn1/lib/ber/index.js @@ -0,0 +1,27 @@ +// Copyright 2011 Mark Cavage All rights reserved. + +var errors = require('./errors'); +var types = require('./types'); + +var Reader = require('./reader'); +var Writer = require('./writer'); + + +// --- Exports + +module.exports = { + + Reader: Reader, + + Writer: Writer + +}; + +for (var t in types) { + if (types.hasOwnProperty(t)) + module.exports[t] = types[t]; +} +for (var e in errors) { + if (errors.hasOwnProperty(e)) + module.exports[e] = errors[e]; +} diff --git a/src/node_modules/asn1/lib/ber/reader.js b/src/node_modules/asn1/lib/ber/reader.js new file mode 100644 index 0000000..8a7e4ca --- /dev/null +++ b/src/node_modules/asn1/lib/ber/reader.js @@ -0,0 +1,262 @@ +// Copyright 2011 Mark Cavage All rights reserved. + +var assert = require('assert'); +var Buffer = require('safer-buffer').Buffer; + +var ASN1 = require('./types'); +var errors = require('./errors'); + + +// --- Globals + +var newInvalidAsn1Error = errors.newInvalidAsn1Error; + + + +// --- API + +function Reader(data) { + if (!data || !Buffer.isBuffer(data)) + throw new TypeError('data must be a node Buffer'); + + this._buf = data; + this._size = data.length; + + // These hold the "current" state + this._len = 0; + this._offset = 0; +} + +Object.defineProperty(Reader.prototype, 'length', { + enumerable: true, + get: function () { return (this._len); } +}); + +Object.defineProperty(Reader.prototype, 'offset', { + enumerable: true, + get: function () { return (this._offset); } +}); + +Object.defineProperty(Reader.prototype, 'remain', { + get: function () { return (this._size - this._offset); } +}); + +Object.defineProperty(Reader.prototype, 'buffer', { + get: function () { return (this._buf.slice(this._offset)); } +}); + + +/** + * Reads a single byte and advances offset; you can pass in `true` to make this + * a "peek" operation (i.e., get the byte, but don't advance the offset). + * + * @param {Boolean} peek true means don't move offset. + * @return {Number} the next byte, null if not enough data. + */ +Reader.prototype.readByte = function (peek) { + if (this._size - this._offset < 1) + return null; + + var b = this._buf[this._offset] & 0xff; + + if (!peek) + this._offset += 1; + + return b; +}; + + +Reader.prototype.peek = function () { + return this.readByte(true); +}; + + +/** + * Reads a (potentially) variable length off the BER buffer. This call is + * not really meant to be called directly, as callers have to manipulate + * the internal buffer afterwards. + * + * As a result of this call, you can call `Reader.length`, until the + * next thing called that does a readLength. + * + * @return {Number} the amount of offset to advance the buffer. + * @throws {InvalidAsn1Error} on bad ASN.1 + */ +Reader.prototype.readLength = function (offset) { + if (offset === undefined) + offset = this._offset; + + if (offset >= this._size) + return null; + + var lenB = this._buf[offset++] & 0xff; + if (lenB === null) + return null; + + if ((lenB & 0x80) === 0x80) { + lenB &= 0x7f; + + if (lenB === 0) + throw newInvalidAsn1Error('Indefinite length not supported'); + + if (lenB > 4) + throw newInvalidAsn1Error('encoding too long'); + + if (this._size - offset < lenB) + return null; + + this._len = 0; + for (var i = 0; i < lenB; i++) + this._len = (this._len << 8) + (this._buf[offset++] & 0xff); + + } else { + // Wasn't a variable length + this._len = lenB; + } + + return offset; +}; + + +/** + * Parses the next sequence in this BER buffer. + * + * To get the length of the sequence, call `Reader.length`. + * + * @return {Number} the sequence's tag. + */ +Reader.prototype.readSequence = function (tag) { + var seq = this.peek(); + if (seq === null) + return null; + if (tag !== undefined && tag !== seq) + throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + seq.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + if (o === null) + return null; + + this._offset = o; + return seq; +}; + + +Reader.prototype.readInt = function () { + return this._readTag(ASN1.Integer); +}; + + +Reader.prototype.readBoolean = function () { + return (this._readTag(ASN1.Boolean) === 0 ? false : true); +}; + + +Reader.prototype.readEnumeration = function () { + return this._readTag(ASN1.Enumeration); +}; + + +Reader.prototype.readString = function (tag, retbuf) { + if (!tag) + tag = ASN1.OctetString; + + var b = this.peek(); + if (b === null) + return null; + + if (b !== tag) + throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + b.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + + if (o === null) + return null; + + if (this.length > this._size - o) + return null; + + this._offset = o; + + if (this.length === 0) + return retbuf ? Buffer.alloc(0) : ''; + + var str = this._buf.slice(this._offset, this._offset + this.length); + this._offset += this.length; + + return retbuf ? str : str.toString('utf8'); +}; + +Reader.prototype.readOID = function (tag) { + if (!tag) + tag = ASN1.OID; + + var b = this.readString(tag, true); + if (b === null) + return null; + + var values = []; + var value = 0; + + for (var i = 0; i < b.length; i++) { + var byte = b[i] & 0xff; + + value <<= 7; + value += byte & 0x7f; + if ((byte & 0x80) === 0) { + values.push(value); + value = 0; + } + } + + value = values.shift(); + values.unshift(value % 40); + values.unshift((value / 40) >> 0); + + return values.join('.'); +}; + + +Reader.prototype._readTag = function (tag) { + assert.ok(tag !== undefined); + + var b = this.peek(); + + if (b === null) + return null; + + if (b !== tag) + throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + b.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + if (o === null) + return null; + + if (this.length > 4) + throw newInvalidAsn1Error('Integer too long: ' + this.length); + + if (this.length > this._size - o) + return null; + this._offset = o; + + var fb = this._buf[this._offset]; + var value = 0; + + for (var i = 0; i < this.length; i++) { + value <<= 8; + value |= (this._buf[this._offset++] & 0xff); + } + + if ((fb & 0x80) === 0x80 && i !== 4) + value -= (1 << (i * 8)); + + return value >> 0; +}; + + + +// --- Exported API + +module.exports = Reader; diff --git a/src/node_modules/asn1/lib/ber/types.js b/src/node_modules/asn1/lib/ber/types.js new file mode 100644 index 0000000..8aea000 --- /dev/null +++ b/src/node_modules/asn1/lib/ber/types.js @@ -0,0 +1,36 @@ +// Copyright 2011 Mark Cavage All rights reserved. + + +module.exports = { + EOC: 0, + Boolean: 1, + Integer: 2, + BitString: 3, + OctetString: 4, + Null: 5, + OID: 6, + ObjectDescriptor: 7, + External: 8, + Real: 9, // float + Enumeration: 10, + PDV: 11, + Utf8String: 12, + RelativeOID: 13, + Sequence: 16, + Set: 17, + NumericString: 18, + PrintableString: 19, + T61String: 20, + VideotexString: 21, + IA5String: 22, + UTCTime: 23, + GeneralizedTime: 24, + GraphicString: 25, + VisibleString: 26, + GeneralString: 28, + UniversalString: 29, + CharacterString: 30, + BMPString: 31, + Constructor: 32, + Context: 128 +}; diff --git a/src/node_modules/asn1/lib/ber/writer.js b/src/node_modules/asn1/lib/ber/writer.js new file mode 100644 index 0000000..3515acf --- /dev/null +++ b/src/node_modules/asn1/lib/ber/writer.js @@ -0,0 +1,317 @@ +// Copyright 2011 Mark Cavage All rights reserved. + +var assert = require('assert'); +var Buffer = require('safer-buffer').Buffer; +var ASN1 = require('./types'); +var errors = require('./errors'); + + +// --- Globals + +var newInvalidAsn1Error = errors.newInvalidAsn1Error; + +var DEFAULT_OPTS = { + size: 1024, + growthFactor: 8 +}; + + +// --- Helpers + +function merge(from, to) { + assert.ok(from); + assert.equal(typeof (from), 'object'); + assert.ok(to); + assert.equal(typeof (to), 'object'); + + var keys = Object.getOwnPropertyNames(from); + keys.forEach(function (key) { + if (to[key]) + return; + + var value = Object.getOwnPropertyDescriptor(from, key); + Object.defineProperty(to, key, value); + }); + + return to; +} + + + +// --- API + +function Writer(options) { + options = merge(DEFAULT_OPTS, options || {}); + + this._buf = Buffer.alloc(options.size || 1024); + this._size = this._buf.length; + this._offset = 0; + this._options = options; + + // A list of offsets in the buffer where we need to insert + // sequence tag/len pairs. + this._seq = []; +} + +Object.defineProperty(Writer.prototype, 'buffer', { + get: function () { + if (this._seq.length) + throw newInvalidAsn1Error(this._seq.length + ' unended sequence(s)'); + + return (this._buf.slice(0, this._offset)); + } +}); + +Writer.prototype.writeByte = function (b) { + if (typeof (b) !== 'number') + throw new TypeError('argument must be a Number'); + + this._ensure(1); + this._buf[this._offset++] = b; +}; + + +Writer.prototype.writeInt = function (i, tag) { + if (typeof (i) !== 'number') + throw new TypeError('argument must be a Number'); + if (typeof (tag) !== 'number') + tag = ASN1.Integer; + + var sz = 4; + + while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) && + (sz > 1)) { + sz--; + i <<= 8; + } + + if (sz > 4) + throw newInvalidAsn1Error('BER ints cannot be > 0xffffffff'); + + this._ensure(2 + sz); + this._buf[this._offset++] = tag; + this._buf[this._offset++] = sz; + + while (sz-- > 0) { + this._buf[this._offset++] = ((i & 0xff000000) >>> 24); + i <<= 8; + } + +}; + + +Writer.prototype.writeNull = function () { + this.writeByte(ASN1.Null); + this.writeByte(0x00); +}; + + +Writer.prototype.writeEnumeration = function (i, tag) { + if (typeof (i) !== 'number') + throw new TypeError('argument must be a Number'); + if (typeof (tag) !== 'number') + tag = ASN1.Enumeration; + + return this.writeInt(i, tag); +}; + + +Writer.prototype.writeBoolean = function (b, tag) { + if (typeof (b) !== 'boolean') + throw new TypeError('argument must be a Boolean'); + if (typeof (tag) !== 'number') + tag = ASN1.Boolean; + + this._ensure(3); + this._buf[this._offset++] = tag; + this._buf[this._offset++] = 0x01; + this._buf[this._offset++] = b ? 0xff : 0x00; +}; + + +Writer.prototype.writeString = function (s, tag) { + if (typeof (s) !== 'string') + throw new TypeError('argument must be a string (was: ' + typeof (s) + ')'); + if (typeof (tag) !== 'number') + tag = ASN1.OctetString; + + var len = Buffer.byteLength(s); + this.writeByte(tag); + this.writeLength(len); + if (len) { + this._ensure(len); + this._buf.write(s, this._offset); + this._offset += len; + } +}; + + +Writer.prototype.writeBuffer = function (buf, tag) { + if (typeof (tag) !== 'number') + throw new TypeError('tag must be a number'); + if (!Buffer.isBuffer(buf)) + throw new TypeError('argument must be a buffer'); + + this.writeByte(tag); + this.writeLength(buf.length); + this._ensure(buf.length); + buf.copy(this._buf, this._offset, 0, buf.length); + this._offset += buf.length; +}; + + +Writer.prototype.writeStringArray = function (strings) { + if ((!strings instanceof Array)) + throw new TypeError('argument must be an Array[String]'); + + var self = this; + strings.forEach(function (s) { + self.writeString(s); + }); +}; + +// This is really to solve DER cases, but whatever for now +Writer.prototype.writeOID = function (s, tag) { + if (typeof (s) !== 'string') + throw new TypeError('argument must be a string'); + if (typeof (tag) !== 'number') + tag = ASN1.OID; + + if (!/^([0-9]+\.){3,}[0-9]+$/.test(s)) + throw new Error('argument is not a valid OID string'); + + function encodeOctet(bytes, octet) { + if (octet < 128) { + bytes.push(octet); + } else if (octet < 16384) { + bytes.push((octet >>> 7) | 0x80); + bytes.push(octet & 0x7F); + } else if (octet < 2097152) { + bytes.push((octet >>> 14) | 0x80); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } else if (octet < 268435456) { + bytes.push((octet >>> 21) | 0x80); + bytes.push(((octet >>> 14) | 0x80) & 0xFF); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } else { + bytes.push(((octet >>> 28) | 0x80) & 0xFF); + bytes.push(((octet >>> 21) | 0x80) & 0xFF); + bytes.push(((octet >>> 14) | 0x80) & 0xFF); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } + } + + var tmp = s.split('.'); + var bytes = []; + bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10)); + tmp.slice(2).forEach(function (b) { + encodeOctet(bytes, parseInt(b, 10)); + }); + + var self = this; + this._ensure(2 + bytes.length); + this.writeByte(tag); + this.writeLength(bytes.length); + bytes.forEach(function (b) { + self.writeByte(b); + }); +}; + + +Writer.prototype.writeLength = function (len) { + if (typeof (len) !== 'number') + throw new TypeError('argument must be a Number'); + + this._ensure(4); + + if (len <= 0x7f) { + this._buf[this._offset++] = len; + } else if (len <= 0xff) { + this._buf[this._offset++] = 0x81; + this._buf[this._offset++] = len; + } else if (len <= 0xffff) { + this._buf[this._offset++] = 0x82; + this._buf[this._offset++] = len >> 8; + this._buf[this._offset++] = len; + } else if (len <= 0xffffff) { + this._buf[this._offset++] = 0x83; + this._buf[this._offset++] = len >> 16; + this._buf[this._offset++] = len >> 8; + this._buf[this._offset++] = len; + } else { + throw newInvalidAsn1Error('Length too long (> 4 bytes)'); + } +}; + +Writer.prototype.startSequence = function (tag) { + if (typeof (tag) !== 'number') + tag = ASN1.Sequence | ASN1.Constructor; + + this.writeByte(tag); + this._seq.push(this._offset); + this._ensure(3); + this._offset += 3; +}; + + +Writer.prototype.endSequence = function () { + var seq = this._seq.pop(); + var start = seq + 3; + var len = this._offset - start; + + if (len <= 0x7f) { + this._shift(start, len, -2); + this._buf[seq] = len; + } else if (len <= 0xff) { + this._shift(start, len, -1); + this._buf[seq] = 0x81; + this._buf[seq + 1] = len; + } else if (len <= 0xffff) { + this._buf[seq] = 0x82; + this._buf[seq + 1] = len >> 8; + this._buf[seq + 2] = len; + } else if (len <= 0xffffff) { + this._shift(start, len, 1); + this._buf[seq] = 0x83; + this._buf[seq + 1] = len >> 16; + this._buf[seq + 2] = len >> 8; + this._buf[seq + 3] = len; + } else { + throw newInvalidAsn1Error('Sequence too long'); + } +}; + + +Writer.prototype._shift = function (start, len, shift) { + assert.ok(start !== undefined); + assert.ok(len !== undefined); + assert.ok(shift); + + this._buf.copy(this._buf, start + shift, start, start + len); + this._offset += shift; +}; + +Writer.prototype._ensure = function (len) { + assert.ok(len); + + if (this._size - this._offset < len) { + var sz = this._size * this._options.growthFactor; + if (sz - this._offset < len) + sz += len; + + var buf = Buffer.alloc(sz); + + this._buf.copy(buf, 0, 0, this._offset); + this._buf = buf; + this._size = sz; + } +}; + + + +// --- Exported API + +module.exports = Writer; diff --git a/src/node_modules/asn1/lib/index.js b/src/node_modules/asn1/lib/index.js new file mode 100644 index 0000000..ede3ab2 --- /dev/null +++ b/src/node_modules/asn1/lib/index.js @@ -0,0 +1,20 @@ +// Copyright 2011 Mark Cavage All rights reserved. + +// If you have no idea what ASN.1 or BER is, see this: +// ftp://ftp.rsa.com/pub/pkcs/ascii/layman.asc + +var Ber = require('./ber/index'); + + + +// --- Exported API + +module.exports = { + + Ber: Ber, + + BerReader: Ber.Reader, + + BerWriter: Ber.Writer + +}; diff --git a/src/node_modules/asn1/package.json b/src/node_modules/asn1/package.json new file mode 100644 index 0000000..487c40e --- /dev/null +++ b/src/node_modules/asn1/package.json @@ -0,0 +1,78 @@ +{ + "_args": [ + [ + "asn1@0.2.4", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "asn1@0.2.4", + "_id": "asn1@0.2.4", + "_inBundle": false, + "_integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "_location": "/asn1", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "asn1@0.2.4", + "name": "asn1", + "escapedName": "asn1", + "rawSpec": "0.2.4", + "saveSpec": null, + "fetchSpec": "0.2.4" + }, + "_requiredBy": [ + "/sshpk" + ], + "_resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", + "_spec": "0.2.4", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Joyent", + "url": "joyent.com" + }, + "bugs": { + "url": "https://github.com/joyent/node-asn1/issues" + }, + "contributors": [ + { + "name": "Mark Cavage", + "email": "mcavage@gmail.com" + }, + { + "name": "David Gwynne", + "email": "loki@animata.net" + }, + { + "name": "Yunong Xiao", + "email": "yunong@joyent.com" + }, + { + "name": "Alex Wilson", + "email": "alex.wilson@joyent.com" + } + ], + "dependencies": { + "safer-buffer": "~2.1.0" + }, + "description": "Contains parsers and serializers for ASN.1 (currently BER only)", + "devDependencies": { + "eslint": "2.13.1", + "eslint-plugin-joyent": "~1.3.0", + "faucet": "0.0.1", + "istanbul": "^0.3.6", + "tape": "^3.5.0" + }, + "homepage": "https://github.com/joyent/node-asn1#readme", + "license": "MIT", + "main": "lib/index.js", + "name": "asn1", + "repository": { + "type": "git", + "url": "git://github.com/joyent/node-asn1.git" + }, + "scripts": { + "test": "tape ./test/ber/*.test.js" + }, + "version": "0.2.4" +} diff --git a/src/node_modules/assert-plus/AUTHORS b/src/node_modules/assert-plus/AUTHORS new file mode 100644 index 0000000..1923524 --- /dev/null +++ b/src/node_modules/assert-plus/AUTHORS @@ -0,0 +1,6 @@ +Dave Eddy +Fred Kuo +Lars-Magnus Skog +Mark Cavage +Patrick Mooney +Rob Gulewich diff --git a/src/node_modules/assert-plus/CHANGES.md b/src/node_modules/assert-plus/CHANGES.md new file mode 100644 index 0000000..57d92bf --- /dev/null +++ b/src/node_modules/assert-plus/CHANGES.md @@ -0,0 +1,14 @@ +# assert-plus Changelog + +## 1.0.0 + +- *BREAKING* assert.number (and derivatives) now accept Infinity as valid input +- Add assert.finite check. Previous assert.number callers should use this if + they expect Infinity inputs to throw. + +## 0.2.0 + +- Fix `assert.object(null)` so it throws +- Fix optional/arrayOf exports for non-type-of asserts +- Add optiona/arrayOf exports for Stream/Date/Regex/uuid +- Add basic unit test coverage diff --git a/src/node_modules/assert-plus/README.md b/src/node_modules/assert-plus/README.md new file mode 100644 index 0000000..ec200d1 --- /dev/null +++ b/src/node_modules/assert-plus/README.md @@ -0,0 +1,162 @@ +# assert-plus + +This library is a super small wrapper over node's assert module that has two +things: (1) the ability to disable assertions with the environment variable +NODE\_NDEBUG, and (2) some API wrappers for argument testing. Like +`assert.string(myArg, 'myArg')`. As a simple example, most of my code looks +like this: + +```javascript + var assert = require('assert-plus'); + + function fooAccount(options, callback) { + assert.object(options, 'options'); + assert.number(options.id, 'options.id'); + assert.bool(options.isManager, 'options.isManager'); + assert.string(options.name, 'options.name'); + assert.arrayOfString(options.email, 'options.email'); + assert.func(callback, 'callback'); + + // Do stuff + callback(null, {}); + } +``` + +# API + +All methods that *aren't* part of node's core assert API are simply assumed to +take an argument, and then a string 'name' that's not a message; `AssertionError` +will be thrown if the assertion fails with a message like: + + AssertionError: foo (string) is required + at test (/home/mark/work/foo/foo.js:3:9) + at Object. (/home/mark/work/foo/foo.js:15:1) + at Module._compile (module.js:446:26) + at Object..js (module.js:464:10) + at Module.load (module.js:353:31) + at Function._load (module.js:311:12) + at Array.0 (module.js:484:10) + at EventEmitter._tickCallback (node.js:190:38) + +from: + +```javascript + function test(foo) { + assert.string(foo, 'foo'); + } +``` + +There you go. You can check that arrays are of a homogeneous type with `Arrayof$Type`: + +```javascript + function test(foo) { + assert.arrayOfString(foo, 'foo'); + } +``` + +You can assert IFF an argument is not `undefined` (i.e., an optional arg): + +```javascript + assert.optionalString(foo, 'foo'); +``` + +Lastly, you can opt-out of assertion checking altogether by setting the +environment variable `NODE_NDEBUG=1`. This is pseudo-useful if you have +lots of assertions, and don't want to pay `typeof ()` taxes to v8 in +production. Be advised: The standard functions re-exported from `assert` are +also disabled in assert-plus if NDEBUG is specified. Using them directly from +the `assert` module avoids this behavior. + +The complete list of APIs is: + +* assert.array +* assert.bool +* assert.buffer +* assert.func +* assert.number +* assert.finite +* assert.object +* assert.string +* assert.stream +* assert.date +* assert.regexp +* assert.uuid +* assert.arrayOfArray +* assert.arrayOfBool +* assert.arrayOfBuffer +* assert.arrayOfFunc +* assert.arrayOfNumber +* assert.arrayOfFinite +* assert.arrayOfObject +* assert.arrayOfString +* assert.arrayOfStream +* assert.arrayOfDate +* assert.arrayOfRegexp +* assert.arrayOfUuid +* assert.optionalArray +* assert.optionalBool +* assert.optionalBuffer +* assert.optionalFunc +* assert.optionalNumber +* assert.optionalFinite +* assert.optionalObject +* assert.optionalString +* assert.optionalStream +* assert.optionalDate +* assert.optionalRegexp +* assert.optionalUuid +* assert.optionalArrayOfArray +* assert.optionalArrayOfBool +* assert.optionalArrayOfBuffer +* assert.optionalArrayOfFunc +* assert.optionalArrayOfNumber +* assert.optionalArrayOfFinite +* assert.optionalArrayOfObject +* assert.optionalArrayOfString +* assert.optionalArrayOfStream +* assert.optionalArrayOfDate +* assert.optionalArrayOfRegexp +* assert.optionalArrayOfUuid +* assert.AssertionError +* assert.fail +* assert.ok +* assert.equal +* assert.notEqual +* assert.deepEqual +* assert.notDeepEqual +* assert.strictEqual +* assert.notStrictEqual +* assert.throws +* assert.doesNotThrow +* assert.ifError + +# Installation + + npm install assert-plus + +## License + +The MIT License (MIT) +Copyright (c) 2012 Mark Cavage + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +## Bugs + +See . diff --git a/src/node_modules/assert-plus/assert.js b/src/node_modules/assert-plus/assert.js new file mode 100644 index 0000000..26f944e --- /dev/null +++ b/src/node_modules/assert-plus/assert.js @@ -0,0 +1,211 @@ +// Copyright (c) 2012, Mark Cavage. All rights reserved. +// Copyright 2015 Joyent, Inc. + +var assert = require('assert'); +var Stream = require('stream').Stream; +var util = require('util'); + + +///--- Globals + +/* JSSTYLED */ +var UUID_REGEXP = /^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$/; + + +///--- Internal + +function _capitalize(str) { + return (str.charAt(0).toUpperCase() + str.slice(1)); +} + +function _toss(name, expected, oper, arg, actual) { + throw new assert.AssertionError({ + message: util.format('%s (%s) is required', name, expected), + actual: (actual === undefined) ? typeof (arg) : actual(arg), + expected: expected, + operator: oper || '===', + stackStartFunction: _toss.caller + }); +} + +function _getClass(arg) { + return (Object.prototype.toString.call(arg).slice(8, -1)); +} + +function noop() { + // Why even bother with asserts? +} + + +///--- Exports + +var types = { + bool: { + check: function (arg) { return typeof (arg) === 'boolean'; } + }, + func: { + check: function (arg) { return typeof (arg) === 'function'; } + }, + string: { + check: function (arg) { return typeof (arg) === 'string'; } + }, + object: { + check: function (arg) { + return typeof (arg) === 'object' && arg !== null; + } + }, + number: { + check: function (arg) { + return typeof (arg) === 'number' && !isNaN(arg); + } + }, + finite: { + check: function (arg) { + return typeof (arg) === 'number' && !isNaN(arg) && isFinite(arg); + } + }, + buffer: { + check: function (arg) { return Buffer.isBuffer(arg); }, + operator: 'Buffer.isBuffer' + }, + array: { + check: function (arg) { return Array.isArray(arg); }, + operator: 'Array.isArray' + }, + stream: { + check: function (arg) { return arg instanceof Stream; }, + operator: 'instanceof', + actual: _getClass + }, + date: { + check: function (arg) { return arg instanceof Date; }, + operator: 'instanceof', + actual: _getClass + }, + regexp: { + check: function (arg) { return arg instanceof RegExp; }, + operator: 'instanceof', + actual: _getClass + }, + uuid: { + check: function (arg) { + return typeof (arg) === 'string' && UUID_REGEXP.test(arg); + }, + operator: 'isUUID' + } +}; + +function _setExports(ndebug) { + var keys = Object.keys(types); + var out; + + /* re-export standard assert */ + if (process.env.NODE_NDEBUG) { + out = noop; + } else { + out = function (arg, msg) { + if (!arg) { + _toss(msg, 'true', arg); + } + }; + } + + /* standard checks */ + keys.forEach(function (k) { + if (ndebug) { + out[k] = noop; + return; + } + var type = types[k]; + out[k] = function (arg, msg) { + if (!type.check(arg)) { + _toss(msg, k, type.operator, arg, type.actual); + } + }; + }); + + /* optional checks */ + keys.forEach(function (k) { + var name = 'optional' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + out[name] = function (arg, msg) { + if (arg === undefined || arg === null) { + return; + } + if (!type.check(arg)) { + _toss(msg, k, type.operator, arg, type.actual); + } + }; + }); + + /* arrayOf checks */ + keys.forEach(function (k) { + var name = 'arrayOf' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + var expected = '[' + k + ']'; + out[name] = function (arg, msg) { + if (!Array.isArray(arg)) { + _toss(msg, expected, type.operator, arg, type.actual); + } + var i; + for (i = 0; i < arg.length; i++) { + if (!type.check(arg[i])) { + _toss(msg, expected, type.operator, arg, type.actual); + } + } + }; + }); + + /* optionalArrayOf checks */ + keys.forEach(function (k) { + var name = 'optionalArrayOf' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + var expected = '[' + k + ']'; + out[name] = function (arg, msg) { + if (arg === undefined || arg === null) { + return; + } + if (!Array.isArray(arg)) { + _toss(msg, expected, type.operator, arg, type.actual); + } + var i; + for (i = 0; i < arg.length; i++) { + if (!type.check(arg[i])) { + _toss(msg, expected, type.operator, arg, type.actual); + } + } + }; + }); + + /* re-export built-in assertions */ + Object.keys(assert).forEach(function (k) { + if (k === 'AssertionError') { + out[k] = assert[k]; + return; + } + if (ndebug) { + out[k] = noop; + return; + } + out[k] = assert[k]; + }); + + /* export ourselves (for unit tests _only_) */ + out._setExports = _setExports; + + return out; +} + +module.exports = _setExports(process.env.NODE_NDEBUG); diff --git a/src/node_modules/assert-plus/package.json b/src/node_modules/assert-plus/package.json new file mode 100644 index 0000000..c0576fc --- /dev/null +++ b/src/node_modules/assert-plus/package.json @@ -0,0 +1,90 @@ +{ + "_args": [ + [ + "assert-plus@1.0.0", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "assert-plus@1.0.0", + "_id": "assert-plus@1.0.0", + "_inBundle": false, + "_integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", + "_location": "/assert-plus", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "assert-plus@1.0.0", + "name": "assert-plus", + "escapedName": "assert-plus", + "rawSpec": "1.0.0", + "saveSpec": null, + "fetchSpec": "1.0.0" + }, + "_requiredBy": [ + "/dashdash", + "/getpass", + "/http-signature", + "/jsprim", + "/sshpk", + "/verror" + ], + "_resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "_spec": "1.0.0", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Mark Cavage", + "email": "mcavage@gmail.com" + }, + "bugs": { + "url": "https://github.com/mcavage/node-assert-plus/issues" + }, + "contributors": [ + { + "name": "Dave Eddy", + "email": "dave@daveeddy.com" + }, + { + "name": "Fred Kuo", + "email": "fred.kuo@joyent.com" + }, + { + "name": "Lars-Magnus Skog", + "email": "ralphtheninja@riseup.net" + }, + { + "name": "Mark Cavage", + "email": "mcavage@gmail.com" + }, + { + "name": "Patrick Mooney", + "email": "pmooney@pfmooney.com" + }, + { + "name": "Rob Gulewich", + "email": "robert.gulewich@joyent.com" + } + ], + "dependencies": {}, + "description": "Extra assertions on top of node's assert module", + "devDependencies": { + "faucet": "0.0.1", + "tape": "4.2.2" + }, + "engines": { + "node": ">=0.8" + }, + "homepage": "https://github.com/mcavage/node-assert-plus#readme", + "license": "MIT", + "main": "./assert.js", + "name": "assert-plus", + "optionalDependencies": {}, + "repository": { + "type": "git", + "url": "git+https://github.com/mcavage/node-assert-plus.git" + }, + "scripts": { + "test": "tape tests/*.js | ./node_modules/.bin/faucet" + }, + "version": "1.0.0" +} diff --git a/src/node_modules/asynckit/LICENSE b/src/node_modules/asynckit/LICENSE new file mode 100644 index 0000000..c9eca5d --- /dev/null +++ b/src/node_modules/asynckit/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Alex Indigo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/node_modules/asynckit/README.md b/src/node_modules/asynckit/README.md new file mode 100644 index 0000000..ddcc7e6 --- /dev/null +++ b/src/node_modules/asynckit/README.md @@ -0,0 +1,233 @@ +# asynckit [![NPM Module](https://img.shields.io/npm/v/asynckit.svg?style=flat)](https://www.npmjs.com/package/asynckit) + +Minimal async jobs utility library, with streams support. + +[![PhantomJS Build](https://img.shields.io/travis/alexindigo/asynckit/v0.4.0.svg?label=browser&style=flat)](https://travis-ci.org/alexindigo/asynckit) +[![Linux Build](https://img.shields.io/travis/alexindigo/asynckit/v0.4.0.svg?label=linux:0.12-6.x&style=flat)](https://travis-ci.org/alexindigo/asynckit) +[![Windows Build](https://img.shields.io/appveyor/ci/alexindigo/asynckit/v0.4.0.svg?label=windows:0.12-6.x&style=flat)](https://ci.appveyor.com/project/alexindigo/asynckit) + +[![Coverage Status](https://img.shields.io/coveralls/alexindigo/asynckit/v0.4.0.svg?label=code+coverage&style=flat)](https://coveralls.io/github/alexindigo/asynckit?branch=master) +[![Dependency Status](https://img.shields.io/david/alexindigo/asynckit/v0.4.0.svg?style=flat)](https://david-dm.org/alexindigo/asynckit) +[![bitHound Overall Score](https://www.bithound.io/github/alexindigo/asynckit/badges/score.svg)](https://www.bithound.io/github/alexindigo/asynckit) + + + +AsyncKit provides harness for `parallel` and `serial` iterators over list of items represented by arrays or objects. +Optionally it accepts abort function (should be synchronously return by iterator for each item), and terminates left over jobs upon an error event. For specific iteration order built-in (`ascending` and `descending`) and custom sort helpers also supported, via `asynckit.serialOrdered` method. + +It ensures async operations to keep behavior more stable and prevent `Maximum call stack size exceeded` errors, from sync iterators. + +| compression | size | +| :----------------- | -------: | +| asynckit.js | 12.34 kB | +| asynckit.min.js | 4.11 kB | +| asynckit.min.js.gz | 1.47 kB | + + +## Install + +```sh +$ npm install --save asynckit +``` + +## Examples + +### Parallel Jobs + +Runs iterator over provided array in parallel. Stores output in the `result` array, +on the matching positions. In unlikely event of an error from one of the jobs, +will terminate rest of the active jobs (if abort function is provided) +and return error along with salvaged data to the main callback function. + +#### Input Array + +```javascript +var parallel = require('asynckit').parallel + , assert = require('assert') + ; + +var source = [ 1, 1, 4, 16, 64, 32, 8, 2 ] + , expectedResult = [ 2, 2, 8, 32, 128, 64, 16, 4 ] + , expectedTarget = [ 1, 1, 2, 4, 8, 16, 32, 64 ] + , target = [] + ; + +parallel(source, asyncJob, function(err, result) +{ + assert.deepEqual(result, expectedResult); + assert.deepEqual(target, expectedTarget); +}); + +// async job accepts one element from the array +// and a callback function +function asyncJob(item, cb) +{ + // different delays (in ms) per item + var delay = item * 25; + + // pretend different jobs take different time to finish + // and not in consequential order + var timeoutId = setTimeout(function() { + target.push(item); + cb(null, item * 2); + }, delay); + + // allow to cancel "leftover" jobs upon error + // return function, invoking of which will abort this job + return clearTimeout.bind(null, timeoutId); +} +``` + +More examples could be found in [test/test-parallel-array.js](test/test-parallel-array.js). + +#### Input Object + +Also it supports named jobs, listed via object. + +```javascript +var parallel = require('asynckit/parallel') + , assert = require('assert') + ; + +var source = { first: 1, one: 1, four: 4, sixteen: 16, sixtyFour: 64, thirtyTwo: 32, eight: 8, two: 2 } + , expectedResult = { first: 2, one: 2, four: 8, sixteen: 32, sixtyFour: 128, thirtyTwo: 64, eight: 16, two: 4 } + , expectedTarget = [ 1, 1, 2, 4, 8, 16, 32, 64 ] + , expectedKeys = [ 'first', 'one', 'two', 'four', 'eight', 'sixteen', 'thirtyTwo', 'sixtyFour' ] + , target = [] + , keys = [] + ; + +parallel(source, asyncJob, function(err, result) +{ + assert.deepEqual(result, expectedResult); + assert.deepEqual(target, expectedTarget); + assert.deepEqual(keys, expectedKeys); +}); + +// supports full value, key, callback (shortcut) interface +function asyncJob(item, key, cb) +{ + // different delays (in ms) per item + var delay = item * 25; + + // pretend different jobs take different time to finish + // and not in consequential order + var timeoutId = setTimeout(function() { + keys.push(key); + target.push(item); + cb(null, item * 2); + }, delay); + + // allow to cancel "leftover" jobs upon error + // return function, invoking of which will abort this job + return clearTimeout.bind(null, timeoutId); +} +``` + +More examples could be found in [test/test-parallel-object.js](test/test-parallel-object.js). + +### Serial Jobs + +Runs iterator over provided array sequentially. Stores output in the `result` array, +on the matching positions. In unlikely event of an error from one of the jobs, +will not proceed to the rest of the items in the list +and return error along with salvaged data to the main callback function. + +#### Input Array + +```javascript +var serial = require('asynckit/serial') + , assert = require('assert') + ; + +var source = [ 1, 1, 4, 16, 64, 32, 8, 2 ] + , expectedResult = [ 2, 2, 8, 32, 128, 64, 16, 4 ] + , expectedTarget = [ 0, 1, 2, 3, 4, 5, 6, 7 ] + , target = [] + ; + +serial(source, asyncJob, function(err, result) +{ + assert.deepEqual(result, expectedResult); + assert.deepEqual(target, expectedTarget); +}); + +// extended interface (item, key, callback) +// also supported for arrays +function asyncJob(item, key, cb) +{ + target.push(key); + + // it will be automatically made async + // even it iterator "returns" in the same event loop + cb(null, item * 2); +} +``` + +More examples could be found in [test/test-serial-array.js](test/test-serial-array.js). + +#### Input Object + +Also it supports named jobs, listed via object. + +```javascript +var serial = require('asynckit').serial + , assert = require('assert') + ; + +var source = [ 1, 1, 4, 16, 64, 32, 8, 2 ] + , expectedResult = [ 2, 2, 8, 32, 128, 64, 16, 4 ] + , expectedTarget = [ 0, 1, 2, 3, 4, 5, 6, 7 ] + , target = [] + ; + +var source = { first: 1, one: 1, four: 4, sixteen: 16, sixtyFour: 64, thirtyTwo: 32, eight: 8, two: 2 } + , expectedResult = { first: 2, one: 2, four: 8, sixteen: 32, sixtyFour: 128, thirtyTwo: 64, eight: 16, two: 4 } + , expectedTarget = [ 1, 1, 4, 16, 64, 32, 8, 2 ] + , target = [] + ; + + +serial(source, asyncJob, function(err, result) +{ + assert.deepEqual(result, expectedResult); + assert.deepEqual(target, expectedTarget); +}); + +// shortcut interface (item, callback) +// works for object as well as for the arrays +function asyncJob(item, cb) +{ + target.push(item); + + // it will be automatically made async + // even it iterator "returns" in the same event loop + cb(null, item * 2); +} +``` + +More examples could be found in [test/test-serial-object.js](test/test-serial-object.js). + +_Note: Since _object_ is an _unordered_ collection of properties, +it may produce unexpected results with sequential iterations. +Whenever order of the jobs' execution is important please use `serialOrdered` method._ + +### Ordered Serial Iterations + +TBD + +For example [compare-property](compare-property) package. + +### Streaming interface + +TBD + +## Want to Know More? + +More examples can be found in [test folder](test/). + +Or open an [issue](https://github.com/alexindigo/asynckit/issues) with questions and/or suggestions. + +## License + +AsyncKit is licensed under the MIT license. diff --git a/src/node_modules/asynckit/bench.js b/src/node_modules/asynckit/bench.js new file mode 100644 index 0000000..c612f1a --- /dev/null +++ b/src/node_modules/asynckit/bench.js @@ -0,0 +1,76 @@ +/* eslint no-console: "off" */ + +var asynckit = require('./') + , async = require('async') + , assert = require('assert') + , expected = 0 + ; + +var Benchmark = require('benchmark'); +var suite = new Benchmark.Suite; + +var source = []; +for (var z = 1; z < 100; z++) +{ + source.push(z); + expected += z; +} + +suite +// add tests + +.add('async.map', function(deferred) +{ + var total = 0; + + async.map(source, + function(i, cb) + { + setImmediate(function() + { + total += i; + cb(null, total); + }); + }, + function(err, result) + { + assert.ifError(err); + assert.equal(result[result.length - 1], expected); + deferred.resolve(); + }); +}, {'defer': true}) + + +.add('asynckit.parallel', function(deferred) +{ + var total = 0; + + asynckit.parallel(source, + function(i, cb) + { + setImmediate(function() + { + total += i; + cb(null, total); + }); + }, + function(err, result) + { + assert.ifError(err); + assert.equal(result[result.length - 1], expected); + deferred.resolve(); + }); +}, {'defer': true}) + + +// add listeners +.on('cycle', function(ev) +{ + console.log(String(ev.target)); +}) +.on('complete', function() +{ + console.log('Fastest is ' + this.filter('fastest').map('name')); +}) +// run async +.run({ 'async': true }); diff --git a/src/node_modules/asynckit/index.js b/src/node_modules/asynckit/index.js new file mode 100644 index 0000000..455f945 --- /dev/null +++ b/src/node_modules/asynckit/index.js @@ -0,0 +1,6 @@ +module.exports = +{ + parallel : require('./parallel.js'), + serial : require('./serial.js'), + serialOrdered : require('./serialOrdered.js') +}; diff --git a/src/node_modules/asynckit/lib/abort.js b/src/node_modules/asynckit/lib/abort.js new file mode 100644 index 0000000..114367e --- /dev/null +++ b/src/node_modules/asynckit/lib/abort.js @@ -0,0 +1,29 @@ +// API +module.exports = abort; + +/** + * Aborts leftover active jobs + * + * @param {object} state - current state object + */ +function abort(state) +{ + Object.keys(state.jobs).forEach(clean.bind(state)); + + // reset leftover jobs + state.jobs = {}; +} + +/** + * Cleans up leftover job by invoking abort function for the provided job id + * + * @this state + * @param {string|number} key - job id to abort + */ +function clean(key) +{ + if (typeof this.jobs[key] == 'function') + { + this.jobs[key](); + } +} diff --git a/src/node_modules/asynckit/lib/async.js b/src/node_modules/asynckit/lib/async.js new file mode 100644 index 0000000..7f1288a --- /dev/null +++ b/src/node_modules/asynckit/lib/async.js @@ -0,0 +1,34 @@ +var defer = require('./defer.js'); + +// API +module.exports = async; + +/** + * Runs provided callback asynchronously + * even if callback itself is not + * + * @param {function} callback - callback to invoke + * @returns {function} - augmented callback + */ +function async(callback) +{ + var isAsync = false; + + // check if async happened + defer(function() { isAsync = true; }); + + return function async_callback(err, result) + { + if (isAsync) + { + callback(err, result); + } + else + { + defer(function nextTick_callback() + { + callback(err, result); + }); + } + }; +} diff --git a/src/node_modules/asynckit/lib/defer.js b/src/node_modules/asynckit/lib/defer.js new file mode 100644 index 0000000..b67110c --- /dev/null +++ b/src/node_modules/asynckit/lib/defer.js @@ -0,0 +1,26 @@ +module.exports = defer; + +/** + * Runs provided function on next iteration of the event loop + * + * @param {function} fn - function to run + */ +function defer(fn) +{ + var nextTick = typeof setImmediate == 'function' + ? setImmediate + : ( + typeof process == 'object' && typeof process.nextTick == 'function' + ? process.nextTick + : null + ); + + if (nextTick) + { + nextTick(fn); + } + else + { + setTimeout(fn, 0); + } +} diff --git a/src/node_modules/asynckit/lib/iterate.js b/src/node_modules/asynckit/lib/iterate.js new file mode 100644 index 0000000..5d2839a --- /dev/null +++ b/src/node_modules/asynckit/lib/iterate.js @@ -0,0 +1,75 @@ +var async = require('./async.js') + , abort = require('./abort.js') + ; + +// API +module.exports = iterate; + +/** + * Iterates over each job object + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {object} state - current job status + * @param {function} callback - invoked when all elements processed + */ +function iterate(list, iterator, state, callback) +{ + // store current index + var key = state['keyedList'] ? state['keyedList'][state.index] : state.index; + + state.jobs[key] = runJob(iterator, key, list[key], function(error, output) + { + // don't repeat yourself + // skip secondary callbacks + if (!(key in state.jobs)) + { + return; + } + + // clean up jobs + delete state.jobs[key]; + + if (error) + { + // don't process rest of the results + // stop still active jobs + // and reset the list + abort(state); + } + else + { + state.results[key] = output; + } + + // return salvaged results + callback(error, state.results); + }); +} + +/** + * Runs iterator over provided job element + * + * @param {function} iterator - iterator to invoke + * @param {string|number} key - key/index of the element in the list of jobs + * @param {mixed} item - job description + * @param {function} callback - invoked after iterator is done with the job + * @returns {function|mixed} - job abort function or something else + */ +function runJob(iterator, key, item, callback) +{ + var aborter; + + // allow shortcut if iterator expects only two arguments + if (iterator.length == 2) + { + aborter = iterator(item, async(callback)); + } + // otherwise go with full three arguments + else + { + aborter = iterator(item, key, async(callback)); + } + + return aborter; +} diff --git a/src/node_modules/asynckit/lib/readable_asynckit.js b/src/node_modules/asynckit/lib/readable_asynckit.js new file mode 100644 index 0000000..78ad240 --- /dev/null +++ b/src/node_modules/asynckit/lib/readable_asynckit.js @@ -0,0 +1,91 @@ +var streamify = require('./streamify.js') + , defer = require('./defer.js') + ; + +// API +module.exports = ReadableAsyncKit; + +/** + * Base constructor for all streams + * used to hold properties/methods + */ +function ReadableAsyncKit() +{ + ReadableAsyncKit.super_.apply(this, arguments); + + // list of active jobs + this.jobs = {}; + + // add stream methods + this.destroy = destroy; + this._start = _start; + this._read = _read; +} + +/** + * Destroys readable stream, + * by aborting outstanding jobs + * + * @returns {void} + */ +function destroy() +{ + if (this.destroyed) + { + return; + } + + this.destroyed = true; + + if (typeof this.terminator == 'function') + { + this.terminator(); + } +} + +/** + * Starts provided jobs in async manner + * + * @private + */ +function _start() +{ + // first argument – runner function + var runner = arguments[0] + // take away first argument + , args = Array.prototype.slice.call(arguments, 1) + // second argument - input data + , input = args[0] + // last argument - result callback + , endCb = streamify.callback.call(this, args[args.length - 1]) + ; + + args[args.length - 1] = endCb; + // third argument - iterator + args[1] = streamify.iterator.call(this, args[1]); + + // allow time for proper setup + defer(function() + { + if (!this.destroyed) + { + this.terminator = runner.apply(null, args); + } + else + { + endCb(null, Array.isArray(input) ? [] : {}); + } + }.bind(this)); +} + + +/** + * Implement _read to comply with Readable streams + * Doesn't really make sense for flowing object mode + * + * @private + */ +function _read() +{ + +} diff --git a/src/node_modules/asynckit/lib/readable_parallel.js b/src/node_modules/asynckit/lib/readable_parallel.js new file mode 100644 index 0000000..5d2929f --- /dev/null +++ b/src/node_modules/asynckit/lib/readable_parallel.js @@ -0,0 +1,25 @@ +var parallel = require('../parallel.js'); + +// API +module.exports = ReadableParallel; + +/** + * Streaming wrapper to `asynckit.parallel` + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} callback - invoked when all elements processed + * @returns {stream.Readable#} + */ +function ReadableParallel(list, iterator, callback) +{ + if (!(this instanceof ReadableParallel)) + { + return new ReadableParallel(list, iterator, callback); + } + + // turn on object mode + ReadableParallel.super_.call(this, {objectMode: true}); + + this._start(parallel, list, iterator, callback); +} diff --git a/src/node_modules/asynckit/lib/readable_serial.js b/src/node_modules/asynckit/lib/readable_serial.js new file mode 100644 index 0000000..7822698 --- /dev/null +++ b/src/node_modules/asynckit/lib/readable_serial.js @@ -0,0 +1,25 @@ +var serial = require('../serial.js'); + +// API +module.exports = ReadableSerial; + +/** + * Streaming wrapper to `asynckit.serial` + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} callback - invoked when all elements processed + * @returns {stream.Readable#} + */ +function ReadableSerial(list, iterator, callback) +{ + if (!(this instanceof ReadableSerial)) + { + return new ReadableSerial(list, iterator, callback); + } + + // turn on object mode + ReadableSerial.super_.call(this, {objectMode: true}); + + this._start(serial, list, iterator, callback); +} diff --git a/src/node_modules/asynckit/lib/readable_serial_ordered.js b/src/node_modules/asynckit/lib/readable_serial_ordered.js new file mode 100644 index 0000000..3de89c4 --- /dev/null +++ b/src/node_modules/asynckit/lib/readable_serial_ordered.js @@ -0,0 +1,29 @@ +var serialOrdered = require('../serialOrdered.js'); + +// API +module.exports = ReadableSerialOrdered; +// expose sort helpers +module.exports.ascending = serialOrdered.ascending; +module.exports.descending = serialOrdered.descending; + +/** + * Streaming wrapper to `asynckit.serialOrdered` + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} sortMethod - custom sort function + * @param {function} callback - invoked when all elements processed + * @returns {stream.Readable#} + */ +function ReadableSerialOrdered(list, iterator, sortMethod, callback) +{ + if (!(this instanceof ReadableSerialOrdered)) + { + return new ReadableSerialOrdered(list, iterator, sortMethod, callback); + } + + // turn on object mode + ReadableSerialOrdered.super_.call(this, {objectMode: true}); + + this._start(serialOrdered, list, iterator, sortMethod, callback); +} diff --git a/src/node_modules/asynckit/lib/state.js b/src/node_modules/asynckit/lib/state.js new file mode 100644 index 0000000..cbea7ad --- /dev/null +++ b/src/node_modules/asynckit/lib/state.js @@ -0,0 +1,37 @@ +// API +module.exports = state; + +/** + * Creates initial state object + * for iteration over list + * + * @param {array|object} list - list to iterate over + * @param {function|null} sortMethod - function to use for keys sort, + * or `null` to keep them as is + * @returns {object} - initial state object + */ +function state(list, sortMethod) +{ + var isNamedList = !Array.isArray(list) + , initState = + { + index : 0, + keyedList: isNamedList || sortMethod ? Object.keys(list) : null, + jobs : {}, + results : isNamedList ? {} : [], + size : isNamedList ? Object.keys(list).length : list.length + } + ; + + if (sortMethod) + { + // sort array keys based on it's values + // sort object's keys just on own merit + initState.keyedList.sort(isNamedList ? sortMethod : function(a, b) + { + return sortMethod(list[a], list[b]); + }); + } + + return initState; +} diff --git a/src/node_modules/asynckit/lib/streamify.js b/src/node_modules/asynckit/lib/streamify.js new file mode 100644 index 0000000..f56a1c9 --- /dev/null +++ b/src/node_modules/asynckit/lib/streamify.js @@ -0,0 +1,141 @@ +var async = require('./async.js'); + +// API +module.exports = { + iterator: wrapIterator, + callback: wrapCallback +}; + +/** + * Wraps iterators with long signature + * + * @this ReadableAsyncKit# + * @param {function} iterator - function to wrap + * @returns {function} - wrapped function + */ +function wrapIterator(iterator) +{ + var stream = this; + + return function(item, key, cb) + { + var aborter + , wrappedCb = async(wrapIteratorCallback.call(stream, cb, key)) + ; + + stream.jobs[key] = wrappedCb; + + // it's either shortcut (item, cb) + if (iterator.length == 2) + { + aborter = iterator(item, wrappedCb); + } + // or long format (item, key, cb) + else + { + aborter = iterator(item, key, wrappedCb); + } + + return aborter; + }; +} + +/** + * Wraps provided callback function + * allowing to execute snitch function before + * real callback + * + * @this ReadableAsyncKit# + * @param {function} callback - function to wrap + * @returns {function} - wrapped function + */ +function wrapCallback(callback) +{ + var stream = this; + + var wrapped = function(error, result) + { + return finisher.call(stream, error, result, callback); + }; + + return wrapped; +} + +/** + * Wraps provided iterator callback function + * makes sure snitch only called once, + * but passes secondary calls to the original callback + * + * @this ReadableAsyncKit# + * @param {function} callback - callback to wrap + * @param {number|string} key - iteration key + * @returns {function} wrapped callback + */ +function wrapIteratorCallback(callback, key) +{ + var stream = this; + + return function(error, output) + { + // don't repeat yourself + if (!(key in stream.jobs)) + { + callback(error, output); + return; + } + + // clean up jobs + delete stream.jobs[key]; + + return streamer.call(stream, error, {key: key, value: output}, callback); + }; +} + +/** + * Stream wrapper for iterator callback + * + * @this ReadableAsyncKit# + * @param {mixed} error - error response + * @param {mixed} output - iterator output + * @param {function} callback - callback that expects iterator results + */ +function streamer(error, output, callback) +{ + if (error && !this.error) + { + this.error = error; + this.pause(); + this.emit('error', error); + // send back value only, as expected + callback(error, output && output.value); + return; + } + + // stream stuff + this.push(output); + + // back to original track + // send back value only, as expected + callback(error, output && output.value); +} + +/** + * Stream wrapper for finishing callback + * + * @this ReadableAsyncKit# + * @param {mixed} error - error response + * @param {mixed} output - iterator output + * @param {function} callback - callback that expects final results + */ +function finisher(error, output, callback) +{ + // signal end of the stream + // only for successfully finished streams + if (!error) + { + this.push(null); + } + + // back to original track + callback(error, output); +} diff --git a/src/node_modules/asynckit/lib/terminator.js b/src/node_modules/asynckit/lib/terminator.js new file mode 100644 index 0000000..d6eb992 --- /dev/null +++ b/src/node_modules/asynckit/lib/terminator.js @@ -0,0 +1,29 @@ +var abort = require('./abort.js') + , async = require('./async.js') + ; + +// API +module.exports = terminator; + +/** + * Terminates jobs in the attached state context + * + * @this AsyncKitState# + * @param {function} callback - final callback to invoke after termination + */ +function terminator(callback) +{ + if (!Object.keys(this.jobs).length) + { + return; + } + + // fast forward iteration index + this.index = this.size; + + // abort jobs + abort(this); + + // send back results we have so far + async(callback)(null, this.results); +} diff --git a/src/node_modules/asynckit/package.json b/src/node_modules/asynckit/package.json new file mode 100644 index 0000000..47bb567 --- /dev/null +++ b/src/node_modules/asynckit/package.json @@ -0,0 +1,94 @@ +{ + "_args": [ + [ + "asynckit@0.4.0", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "asynckit@0.4.0", + "_id": "asynckit@0.4.0", + "_inBundle": false, + "_integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", + "_location": "/asynckit", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "asynckit@0.4.0", + "name": "asynckit", + "escapedName": "asynckit", + "rawSpec": "0.4.0", + "saveSpec": null, + "fetchSpec": "0.4.0" + }, + "_requiredBy": [ + "/form-data" + ], + "_resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "_spec": "0.4.0", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Alex Indigo", + "email": "iam@alexindigo.com" + }, + "bugs": { + "url": "https://github.com/alexindigo/asynckit/issues" + }, + "dependencies": {}, + "description": "Minimal async jobs utility library, with streams support", + "devDependencies": { + "browserify": "^13.0.0", + "browserify-istanbul": "^2.0.0", + "coveralls": "^2.11.9", + "eslint": "^2.9.0", + "istanbul": "^0.4.3", + "obake": "^0.1.2", + "phantomjs-prebuilt": "^2.1.7", + "pre-commit": "^1.1.3", + "reamde": "^1.1.0", + "rimraf": "^2.5.2", + "size-table": "^0.2.0", + "tap-spec": "^4.1.1", + "tape": "^4.5.1" + }, + "homepage": "https://github.com/alexindigo/asynckit#readme", + "keywords": [ + "async", + "jobs", + "parallel", + "serial", + "iterator", + "array", + "object", + "stream", + "destroy", + "terminate", + "abort" + ], + "license": "MIT", + "main": "index.js", + "name": "asynckit", + "pre-commit": [ + "clean", + "lint", + "test", + "browser", + "report", + "size" + ], + "repository": { + "type": "git", + "url": "git+https://github.com/alexindigo/asynckit.git" + }, + "scripts": { + "browser": "browserify -t browserify-istanbul test/lib/browserify_adjustment.js test/test-*.js | obake --coverage | tap-spec", + "clean": "rimraf coverage", + "debug": "tape test/test-*.js", + "lint": "eslint *.js lib/*.js test/*.js", + "report": "istanbul report", + "size": "browserify index.js | size-table asynckit", + "test": "istanbul cover --reporter=json tape -- 'test/test-*.js' | tap-spec", + "win-test": "tape test/test-*.js" + }, + "version": "0.4.0" +} diff --git a/src/node_modules/asynckit/parallel.js b/src/node_modules/asynckit/parallel.js new file mode 100644 index 0000000..3c50344 --- /dev/null +++ b/src/node_modules/asynckit/parallel.js @@ -0,0 +1,43 @@ +var iterate = require('./lib/iterate.js') + , initState = require('./lib/state.js') + , terminator = require('./lib/terminator.js') + ; + +// Public API +module.exports = parallel; + +/** + * Runs iterator over provided array elements in parallel + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function parallel(list, iterator, callback) +{ + var state = initState(list); + + while (state.index < (state['keyedList'] || list).length) + { + iterate(list, iterator, state, function(error, result) + { + if (error) + { + callback(error, result); + return; + } + + // looks like it's the last one + if (Object.keys(state.jobs).length === 0) + { + callback(null, state.results); + return; + } + }); + + state.index++; + } + + return terminator.bind(state, callback); +} diff --git a/src/node_modules/asynckit/serial.js b/src/node_modules/asynckit/serial.js new file mode 100644 index 0000000..6cd949a --- /dev/null +++ b/src/node_modules/asynckit/serial.js @@ -0,0 +1,17 @@ +var serialOrdered = require('./serialOrdered.js'); + +// Public API +module.exports = serial; + +/** + * Runs iterator over provided array elements in series + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function serial(list, iterator, callback) +{ + return serialOrdered(list, iterator, null, callback); +} diff --git a/src/node_modules/asynckit/serialOrdered.js b/src/node_modules/asynckit/serialOrdered.js new file mode 100644 index 0000000..607eafe --- /dev/null +++ b/src/node_modules/asynckit/serialOrdered.js @@ -0,0 +1,75 @@ +var iterate = require('./lib/iterate.js') + , initState = require('./lib/state.js') + , terminator = require('./lib/terminator.js') + ; + +// Public API +module.exports = serialOrdered; +// sorting helpers +module.exports.ascending = ascending; +module.exports.descending = descending; + +/** + * Runs iterator over provided sorted array elements in series + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} sortMethod - custom sort function + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function serialOrdered(list, iterator, sortMethod, callback) +{ + var state = initState(list, sortMethod); + + iterate(list, iterator, state, function iteratorHandler(error, result) + { + if (error) + { + callback(error, result); + return; + } + + state.index++; + + // are we there yet? + if (state.index < (state['keyedList'] || list).length) + { + iterate(list, iterator, state, iteratorHandler); + return; + } + + // done here + callback(null, state.results); + }); + + return terminator.bind(state, callback); +} + +/* + * -- Sort methods + */ + +/** + * sort helper to sort array elements in ascending order + * + * @param {mixed} a - an item to compare + * @param {mixed} b - an item to compare + * @returns {number} - comparison result + */ +function ascending(a, b) +{ + return a < b ? -1 : a > b ? 1 : 0; +} + +/** + * sort helper to sort array elements in descending order + * + * @param {mixed} a - an item to compare + * @param {mixed} b - an item to compare + * @returns {number} - comparison result + */ +function descending(a, b) +{ + return -1 * ascending(a, b); +} diff --git a/src/node_modules/asynckit/stream.js b/src/node_modules/asynckit/stream.js new file mode 100644 index 0000000..d43465f --- /dev/null +++ b/src/node_modules/asynckit/stream.js @@ -0,0 +1,21 @@ +var inherits = require('util').inherits + , Readable = require('stream').Readable + , ReadableAsyncKit = require('./lib/readable_asynckit.js') + , ReadableParallel = require('./lib/readable_parallel.js') + , ReadableSerial = require('./lib/readable_serial.js') + , ReadableSerialOrdered = require('./lib/readable_serial_ordered.js') + ; + +// API +module.exports = +{ + parallel : ReadableParallel, + serial : ReadableSerial, + serialOrdered : ReadableSerialOrdered, +}; + +inherits(ReadableAsyncKit, Readable); + +inherits(ReadableParallel, ReadableAsyncKit); +inherits(ReadableSerial, ReadableAsyncKit); +inherits(ReadableSerialOrdered, ReadableAsyncKit); diff --git a/src/node_modules/aws-sign2/LICENSE b/src/node_modules/aws-sign2/LICENSE new file mode 100644 index 0000000..a4a9aee --- /dev/null +++ b/src/node_modules/aws-sign2/LICENSE @@ -0,0 +1,55 @@ +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and + +You must cause any modified files to carry prominent notices stating that You changed the files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/src/node_modules/aws-sign2/README.md b/src/node_modules/aws-sign2/README.md new file mode 100644 index 0000000..763564e --- /dev/null +++ b/src/node_modules/aws-sign2/README.md @@ -0,0 +1,4 @@ +aws-sign +======== + +AWS signing. Originally pulled from LearnBoost/knox, maintained as vendor in request, now a standalone module. diff --git a/src/node_modules/aws-sign2/index.js b/src/node_modules/aws-sign2/index.js new file mode 100644 index 0000000..fb35f6d --- /dev/null +++ b/src/node_modules/aws-sign2/index.js @@ -0,0 +1,212 @@ + +/*! + * Copyright 2010 LearnBoost + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Module dependencies. + */ + +var crypto = require('crypto') + , parse = require('url').parse + ; + +/** + * Valid keys. + */ + +var keys = + [ 'acl' + , 'location' + , 'logging' + , 'notification' + , 'partNumber' + , 'policy' + , 'requestPayment' + , 'torrent' + , 'uploadId' + , 'uploads' + , 'versionId' + , 'versioning' + , 'versions' + , 'website' + ] + +/** + * Return an "Authorization" header value with the given `options` + * in the form of "AWS :" + * + * @param {Object} options + * @return {String} + * @api private + */ + +function authorization (options) { + return 'AWS ' + options.key + ':' + sign(options) +} + +module.exports = authorization +module.exports.authorization = authorization + +/** + * Simple HMAC-SHA1 Wrapper + * + * @param {Object} options + * @return {String} + * @api private + */ + +function hmacSha1 (options) { + return crypto.createHmac('sha1', options.secret).update(options.message).digest('base64') +} + +module.exports.hmacSha1 = hmacSha1 + +/** + * Create a base64 sha1 HMAC for `options`. + * + * @param {Object} options + * @return {String} + * @api private + */ + +function sign (options) { + options.message = stringToSign(options) + return hmacSha1(options) +} +module.exports.sign = sign + +/** + * Create a base64 sha1 HMAC for `options`. + * + * Specifically to be used with S3 presigned URLs + * + * @param {Object} options + * @return {String} + * @api private + */ + +function signQuery (options) { + options.message = queryStringToSign(options) + return hmacSha1(options) +} +module.exports.signQuery= signQuery + +/** + * Return a string for sign() with the given `options`. + * + * Spec: + * + * \n + * \n + * \n + * \n + * [headers\n] + * + * + * @param {Object} options + * @return {String} + * @api private + */ + +function stringToSign (options) { + var headers = options.amazonHeaders || '' + if (headers) headers += '\n' + var r = + [ options.verb + , options.md5 + , options.contentType + , options.date ? options.date.toUTCString() : '' + , headers + options.resource + ] + return r.join('\n') +} +module.exports.stringToSign = stringToSign + +/** + * Return a string for sign() with the given `options`, but is meant exclusively + * for S3 presigned URLs + * + * Spec: + * + * \n + * + * + * @param {Object} options + * @return {String} + * @api private + */ + +function queryStringToSign (options){ + return 'GET\n\n\n' + options.date + '\n' + options.resource +} +module.exports.queryStringToSign = queryStringToSign + +/** + * Perform the following: + * + * - ignore non-amazon headers + * - lowercase fields + * - sort lexicographically + * - trim whitespace between ":" + * - join with newline + * + * @param {Object} headers + * @return {String} + * @api private + */ + +function canonicalizeHeaders (headers) { + var buf = [] + , fields = Object.keys(headers) + ; + for (var i = 0, len = fields.length; i < len; ++i) { + var field = fields[i] + , val = headers[field] + , field = field.toLowerCase() + ; + if (0 !== field.indexOf('x-amz')) continue + buf.push(field + ':' + val) + } + return buf.sort().join('\n') +} +module.exports.canonicalizeHeaders = canonicalizeHeaders + +/** + * Perform the following: + * + * - ignore non sub-resources + * - sort lexicographically + * + * @param {String} resource + * @return {String} + * @api private + */ + +function canonicalizeResource (resource) { + var url = parse(resource, true) + , path = url.pathname + , buf = [] + ; + + Object.keys(url.query).forEach(function(key){ + if (!~keys.indexOf(key)) return + var val = '' == url.query[key] ? '' : '=' + encodeURIComponent(url.query[key]) + buf.push(key + val) + }) + + return path + (buf.length ? '?' + buf.sort().join('&') : '') +} +module.exports.canonicalizeResource = canonicalizeResource diff --git a/src/node_modules/aws-sign2/package.json b/src/node_modules/aws-sign2/package.json new file mode 100644 index 0000000..ab36553 --- /dev/null +++ b/src/node_modules/aws-sign2/package.json @@ -0,0 +1,53 @@ +{ + "_args": [ + [ + "aws-sign2@0.7.0", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "aws-sign2@0.7.0", + "_id": "aws-sign2@0.7.0", + "_inBundle": false, + "_integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", + "_location": "/aws-sign2", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "aws-sign2@0.7.0", + "name": "aws-sign2", + "escapedName": "aws-sign2", + "rawSpec": "0.7.0", + "saveSpec": null, + "fetchSpec": "0.7.0" + }, + "_requiredBy": [ + "/request" + ], + "_resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "_spec": "0.7.0", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Mikeal Rogers", + "email": "mikeal.rogers@gmail.com", + "url": "http://www.futurealoof.com" + }, + "bugs": { + "url": "https://github.com/mikeal/aws-sign/issues" + }, + "dependencies": {}, + "description": "AWS signing. Originally pulled from LearnBoost/knox, maintained as vendor in request, now a standalone module.", + "devDependencies": {}, + "engines": { + "node": "*" + }, + "homepage": "https://github.com/mikeal/aws-sign#readme", + "license": "Apache-2.0", + "main": "index.js", + "name": "aws-sign2", + "optionalDependencies": {}, + "repository": { + "url": "git+https://github.com/mikeal/aws-sign.git" + }, + "version": "0.7.0" +} diff --git a/src/node_modules/aws4/.travis.yml b/src/node_modules/aws4/.travis.yml new file mode 100644 index 0000000..61d0634 --- /dev/null +++ b/src/node_modules/aws4/.travis.yml @@ -0,0 +1,5 @@ +language: node_js +node_js: + - "0.10" + - "0.12" + - "4.2" diff --git a/src/node_modules/aws4/LICENSE b/src/node_modules/aws4/LICENSE new file mode 100644 index 0000000..4f321e5 --- /dev/null +++ b/src/node_modules/aws4/LICENSE @@ -0,0 +1,19 @@ +Copyright 2013 Michael Hart (michael.hart.au@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/node_modules/aws4/README.md b/src/node_modules/aws4/README.md new file mode 100644 index 0000000..6b002d0 --- /dev/null +++ b/src/node_modules/aws4/README.md @@ -0,0 +1,523 @@ +aws4 +---- + +[![Build Status](https://secure.travis-ci.org/mhart/aws4.png?branch=master)](http://travis-ci.org/mhart/aws4) + +A small utility to sign vanilla node.js http(s) request options using Amazon's +[AWS Signature Version 4](http://docs.amazonwebservices.com/general/latest/gr/signature-version-4.html). + +Can also be used [in the browser](./browser). + +This signature is supported by nearly all Amazon services, including +[S3](http://docs.aws.amazon.com/AmazonS3/latest/API/), +[EC2](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/), +[DynamoDB](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/API.html), +[Kinesis](http://docs.aws.amazon.com/kinesis/latest/APIReference/), +[Lambda](http://docs.aws.amazon.com/lambda/latest/dg/API_Reference.html), +[SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/), +[SNS](http://docs.aws.amazon.com/sns/latest/api/), +[IAM](http://docs.aws.amazon.com/IAM/latest/APIReference/), +[STS](http://docs.aws.amazon.com/STS/latest/APIReference/), +[RDS](http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/), +[CloudWatch](http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/), +[CloudWatch Logs](http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/), +[CodeDeploy](http://docs.aws.amazon.com/codedeploy/latest/APIReference/), +[CloudFront](http://docs.aws.amazon.com/AmazonCloudFront/latest/APIReference/), +[CloudTrail](http://docs.aws.amazon.com/awscloudtrail/latest/APIReference/), +[ElastiCache](http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/), +[EMR](http://docs.aws.amazon.com/ElasticMapReduce/latest/API/), +[Glacier](http://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-api.html), +[CloudSearch](http://docs.aws.amazon.com/cloudsearch/latest/developerguide/APIReq.html), +[Elastic Load Balancing](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/), +[Elastic Transcoder](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/api-reference.html), +[CloudFormation](http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/), +[Elastic Beanstalk](http://docs.aws.amazon.com/elasticbeanstalk/latest/api/), +[Storage Gateway](http://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html), +[Data Pipeline](http://docs.aws.amazon.com/datapipeline/latest/APIReference/), +[Direct Connect](http://docs.aws.amazon.com/directconnect/latest/APIReference/), +[Redshift](http://docs.aws.amazon.com/redshift/latest/APIReference/), +[OpsWorks](http://docs.aws.amazon.com/opsworks/latest/APIReference/), +[SES](http://docs.aws.amazon.com/ses/latest/APIReference/), +[SWF](http://docs.aws.amazon.com/amazonswf/latest/apireference/), +[AutoScaling](http://docs.aws.amazon.com/AutoScaling/latest/APIReference/), +[Mobile Analytics](http://docs.aws.amazon.com/mobileanalytics/latest/ug/server-reference.html), +[Cognito Identity](http://docs.aws.amazon.com/cognitoidentity/latest/APIReference/), +[Cognito Sync](http://docs.aws.amazon.com/cognitosync/latest/APIReference/), +[Container Service](http://docs.aws.amazon.com/AmazonECS/latest/APIReference/), +[AppStream](http://docs.aws.amazon.com/appstream/latest/developerguide/appstream-api-rest.html), +[Key Management Service](http://docs.aws.amazon.com/kms/latest/APIReference/), +[Config](http://docs.aws.amazon.com/config/latest/APIReference/), +[CloudHSM](http://docs.aws.amazon.com/cloudhsm/latest/dg/api-ref.html), +[Route53](http://docs.aws.amazon.com/Route53/latest/APIReference/requests-rest.html) and +[Route53 Domains](http://docs.aws.amazon.com/Route53/latest/APIReference/requests-rpc.html). + +Indeed, the only AWS services that *don't* support v4 as of 2014-12-30 are +[Import/Export](http://docs.aws.amazon.com/AWSImportExport/latest/DG/api-reference.html) and +[SimpleDB](http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/SDB_API.html) +(they only support [AWS Signature Version 2](https://github.com/mhart/aws2)). + +It also provides defaults for a number of core AWS headers and +request parameters, making it very easy to query AWS services, or +build out a fully-featured AWS library. + +Example +------- + +```javascript +var http = require('http'), + https = require('https'), + aws4 = require('aws4') + +// given an options object you could pass to http.request +var opts = {host: 'sqs.us-east-1.amazonaws.com', path: '/?Action=ListQueues'} + +// alternatively (as aws4 can infer the host): +opts = {service: 'sqs', region: 'us-east-1', path: '/?Action=ListQueues'} + +// alternatively (as us-east-1 is default): +opts = {service: 'sqs', path: '/?Action=ListQueues'} + +aws4.sign(opts) // assumes AWS credentials are available in process.env + +console.log(opts) +/* +{ + host: 'sqs.us-east-1.amazonaws.com', + path: '/?Action=ListQueues', + headers: { + Host: 'sqs.us-east-1.amazonaws.com', + 'X-Amz-Date': '20121226T061030Z', + Authorization: 'AWS4-HMAC-SHA256 Credential=ABCDEF/20121226/us-east-1/sqs/aws4_request, ...' + } +} +*/ + +// we can now use this to query AWS using the standard node.js http API +http.request(opts, function(res) { res.pipe(process.stdout) }).end() +/* + + +... +*/ +``` + +More options +------------ + +```javascript +// you can also pass AWS credentials in explicitly (otherwise taken from process.env) +aws4.sign(opts, {accessKeyId: '', secretAccessKey: ''}) + +// can also add the signature to query strings +aws4.sign({service: 's3', path: '/my-bucket?X-Amz-Expires=12345', signQuery: true}) + +// create a utility function to pipe to stdout (with https this time) +function request(o) { https.request(o, function(res) { res.pipe(process.stdout) }).end(o.body || '') } + +// aws4 can infer the HTTP method if a body is passed in +// method will be POST and Content-Type: 'application/x-www-form-urlencoded; charset=utf-8' +request(aws4.sign({service: 'iam', body: 'Action=ListGroups&Version=2010-05-08'})) +/* + +... +*/ + +// can specify any custom option or header as per usual +request(aws4.sign({ + service: 'dynamodb', + region: 'ap-southeast-2', + method: 'POST', + path: '/', + headers: { + 'Content-Type': 'application/x-amz-json-1.0', + 'X-Amz-Target': 'DynamoDB_20120810.ListTables' + }, + body: '{}' +})) +/* +{"TableNames":[]} +... +*/ + +// works with all other services that support Signature Version 4 + +request(aws4.sign({service: 's3', path: '/', signQuery: true})) +/* + +... +*/ + +request(aws4.sign({service: 'ec2', path: '/?Action=DescribeRegions&Version=2014-06-15'})) +/* + +... +*/ + +request(aws4.sign({service: 'sns', path: '/?Action=ListTopics&Version=2010-03-31'})) +/* + +... +*/ + +request(aws4.sign({service: 'sts', path: '/?Action=GetSessionToken&Version=2011-06-15'})) +/* + +... +*/ + +request(aws4.sign({service: 'cloudsearch', path: '/?Action=ListDomainNames&Version=2013-01-01'})) +/* + +... +*/ + +request(aws4.sign({service: 'ses', path: '/?Action=ListIdentities&Version=2010-12-01'})) +/* + +... +*/ + +request(aws4.sign({service: 'autoscaling', path: '/?Action=DescribeAutoScalingInstances&Version=2011-01-01'})) +/* + +... +*/ + +request(aws4.sign({service: 'elasticloadbalancing', path: '/?Action=DescribeLoadBalancers&Version=2012-06-01'})) +/* + +... +*/ + +request(aws4.sign({service: 'cloudformation', path: '/?Action=ListStacks&Version=2010-05-15'})) +/* + +... +*/ + +request(aws4.sign({service: 'elasticbeanstalk', path: '/?Action=ListAvailableSolutionStacks&Version=2010-12-01'})) +/* + +... +*/ + +request(aws4.sign({service: 'rds', path: '/?Action=DescribeDBInstances&Version=2012-09-17'})) +/* + +... +*/ + +request(aws4.sign({service: 'monitoring', path: '/?Action=ListMetrics&Version=2010-08-01'})) +/* + +... +*/ + +request(aws4.sign({service: 'redshift', path: '/?Action=DescribeClusters&Version=2012-12-01'})) +/* + +... +*/ + +request(aws4.sign({service: 'cloudfront', path: '/2014-05-31/distribution'})) +/* + +... +*/ + +request(aws4.sign({service: 'elasticache', path: '/?Action=DescribeCacheClusters&Version=2014-07-15'})) +/* + +... +*/ + +request(aws4.sign({service: 'elasticmapreduce', path: '/?Action=DescribeJobFlows&Version=2009-03-31'})) +/* + +... +*/ + +request(aws4.sign({service: 'route53', path: '/2013-04-01/hostedzone'})) +/* + +... +*/ + +request(aws4.sign({service: 'appstream', path: '/applications'})) +/* +{"_links":{"curie":[{"href":"http://docs.aws.amazon.com/appstream/latest/... +... +*/ + +request(aws4.sign({service: 'cognito-sync', path: '/identitypools'})) +/* +{"Count":0,"IdentityPoolUsages":[],"MaxResults":16,"NextToken":null} +... +*/ + +request(aws4.sign({service: 'elastictranscoder', path: '/2012-09-25/pipelines'})) +/* +{"NextPageToken":null,"Pipelines":[]} +... +*/ + +request(aws4.sign({service: 'lambda', path: '/2014-11-13/functions/'})) +/* +{"Functions":[],"NextMarker":null} +... +*/ + +request(aws4.sign({service: 'ecs', path: '/?Action=ListClusters&Version=2014-11-13'})) +/* + +... +*/ + +request(aws4.sign({service: 'glacier', path: '/-/vaults', headers: {'X-Amz-Glacier-Version': '2012-06-01'}})) +/* +{"Marker":null,"VaultList":[]} +... +*/ + +request(aws4.sign({service: 'storagegateway', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'StorageGateway_20120630.ListGateways' +}})) +/* +{"Gateways":[]} +... +*/ + +request(aws4.sign({service: 'datapipeline', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'DataPipeline.ListPipelines' +}})) +/* +{"hasMoreResults":false,"pipelineIdList":[]} +... +*/ + +request(aws4.sign({service: 'opsworks', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'OpsWorks_20130218.DescribeStacks' +}})) +/* +{"Stacks":[]} +... +*/ + +request(aws4.sign({service: 'route53domains', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'Route53Domains_v20140515.ListDomains' +}})) +/* +{"Domains":[]} +... +*/ + +request(aws4.sign({service: 'kinesis', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'Kinesis_20131202.ListStreams' +}})) +/* +{"HasMoreStreams":false,"StreamNames":[]} +... +*/ + +request(aws4.sign({service: 'cloudtrail', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'CloudTrail_20131101.DescribeTrails' +}})) +/* +{"trailList":[]} +... +*/ + +request(aws4.sign({service: 'logs', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'Logs_20140328.DescribeLogGroups' +}})) +/* +{"logGroups":[]} +... +*/ + +request(aws4.sign({service: 'codedeploy', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'CodeDeploy_20141006.ListApplications' +}})) +/* +{"applications":[]} +... +*/ + +request(aws4.sign({service: 'directconnect', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'OvertureService.DescribeConnections' +}})) +/* +{"connections":[]} +... +*/ + +request(aws4.sign({service: 'kms', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'TrentService.ListKeys' +}})) +/* +{"Keys":[],"Truncated":false} +... +*/ + +request(aws4.sign({service: 'config', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'StarlingDoveService.DescribeDeliveryChannels' +}})) +/* +{"DeliveryChannels":[]} +... +*/ + +request(aws4.sign({service: 'cloudhsm', body: '{}', headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'CloudHsmFrontendService.ListAvailableZones' +}})) +/* +{"AZList":["us-east-1a","us-east-1b","us-east-1c"]} +... +*/ + +request(aws4.sign({ + service: 'swf', + body: '{"registrationStatus":"REGISTERED"}', + headers: { + 'Content-Type': 'application/x-amz-json-1.0', + 'X-Amz-Target': 'SimpleWorkflowService.ListDomains' + } +})) +/* +{"domainInfos":[]} +... +*/ + +request(aws4.sign({ + service: 'cognito-identity', + body: '{"MaxResults": 1}', + headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': 'AWSCognitoIdentityService.ListIdentityPools' + } +})) +/* +{"IdentityPools":[]} +... +*/ + +request(aws4.sign({ + service: 'mobileanalytics', + path: '/2014-06-05/events', + body: JSON.stringify({events:[{ + eventType: 'a', + timestamp: new Date().toISOString(), + session: {}, + }]}), + headers: { + 'Content-Type': 'application/json', + 'X-Amz-Client-Context': JSON.stringify({ + client: {client_id: 'a', app_title: 'a'}, + custom: {}, + env: {platform: 'a'}, + services: {}, + }), + } +})) +/* +(HTTP 202, empty response) +*/ + +// Generate CodeCommit Git access password +var signer = new aws4.RequestSigner({ + service: 'codecommit', + host: 'git-codecommit.us-east-1.amazonaws.com', + method: 'GIT', + path: '/v1/repos/MyAwesomeRepo', +}) +var password = signer.getDateTime() + 'Z' + signer.signature() +``` + +API +--- + +### aws4.sign(requestOptions, [credentials]) + +This calculates and populates the `Authorization` header of +`requestOptions`, and any other necessary AWS headers and/or request +options. Returns `requestOptions` as a convenience for chaining. + +`requestOptions` is an object holding the same options that the node.js +[http.request](http://nodejs.org/docs/latest/api/http.html#http_http_request_options_callback) +function takes. + +The following properties of `requestOptions` are used in the signing or +populated if they don't already exist: + +- `hostname` or `host` (will be determined from `service` and `region` if not given) +- `method` (will use `'GET'` if not given or `'POST'` if there is a `body`) +- `path` (will use `'/'` if not given) +- `body` (will use `''` if not given) +- `service` (will be calculated from `hostname` or `host` if not given) +- `region` (will be calculated from `hostname` or `host` or use `'us-east-1'` if not given) +- `headers['Host']` (will use `hostname` or `host` or be calculated if not given) +- `headers['Content-Type']` (will use `'application/x-www-form-urlencoded; charset=utf-8'` + if not given and there is a `body`) +- `headers['Date']` (used to calculate the signature date if given, otherwise `new Date` is used) + +Your AWS credentials (which can be found in your +[AWS console](https://portal.aws.amazon.com/gp/aws/securityCredentials)) +can be specified in one of two ways: + +- As the second argument, like this: + +```javascript +aws4.sign(requestOptions, { + secretAccessKey: "", + accessKeyId: "", + sessionToken: "" +}) +``` + +- From `process.env`, such as this: + +``` +export AWS_SECRET_ACCESS_KEY="" +export AWS_ACCESS_KEY_ID="" +export AWS_SESSION_TOKEN="" +``` + +(will also use `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` if available) + +The `sessionToken` property and `AWS_SESSION_TOKEN` environment variable are optional for signing +with [IAM STS temporary credentials](http://docs.aws.amazon.com/STS/latest/UsingSTS/using-temp-creds.html). + +Installation +------------ + +With [npm](http://npmjs.org/) do: + +``` +npm install aws4 +``` + +Can also be used [in the browser](./browser). + +Thanks +------ + +Thanks to [@jed](https://github.com/jed) for his +[dynamo-client](https://github.com/jed/dynamo-client) lib where I first +committed and subsequently extracted this code. + +Also thanks to the +[official node.js AWS SDK](https://github.com/aws/aws-sdk-js) for giving +me a start on implementing the v4 signature. + diff --git a/src/node_modules/aws4/aws4.js b/src/node_modules/aws4/aws4.js new file mode 100644 index 0000000..124cd7a --- /dev/null +++ b/src/node_modules/aws4/aws4.js @@ -0,0 +1,332 @@ +var aws4 = exports, + url = require('url'), + querystring = require('querystring'), + crypto = require('crypto'), + lru = require('./lru'), + credentialsCache = lru(1000) + +// http://docs.amazonwebservices.com/general/latest/gr/signature-version-4.html + +function hmac(key, string, encoding) { + return crypto.createHmac('sha256', key).update(string, 'utf8').digest(encoding) +} + +function hash(string, encoding) { + return crypto.createHash('sha256').update(string, 'utf8').digest(encoding) +} + +// This function assumes the string has already been percent encoded +function encodeRfc3986(urlEncodedString) { + return urlEncodedString.replace(/[!'()*]/g, function(c) { + return '%' + c.charCodeAt(0).toString(16).toUpperCase() + }) +} + +// request: { path | body, [host], [method], [headers], [service], [region] } +// credentials: { accessKeyId, secretAccessKey, [sessionToken] } +function RequestSigner(request, credentials) { + + if (typeof request === 'string') request = url.parse(request) + + var headers = request.headers = (request.headers || {}), + hostParts = this.matchHost(request.hostname || request.host || headers.Host || headers.host) + + this.request = request + this.credentials = credentials || this.defaultCredentials() + + this.service = request.service || hostParts[0] || '' + this.region = request.region || hostParts[1] || 'us-east-1' + + // SES uses a different domain from the service name + if (this.service === 'email') this.service = 'ses' + + if (!request.method && request.body) + request.method = 'POST' + + if (!headers.Host && !headers.host) { + headers.Host = request.hostname || request.host || this.createHost() + + // If a port is specified explicitly, use it as is + if (request.port) + headers.Host += ':' + request.port + } + if (!request.hostname && !request.host) + request.hostname = headers.Host || headers.host + + this.isCodeCommitGit = this.service === 'codecommit' && request.method === 'GIT' +} + +RequestSigner.prototype.matchHost = function(host) { + var match = (host || '').match(/([^\.]+)\.(?:([^\.]*)\.)?amazonaws\.com(\.cn)?$/) + var hostParts = (match || []).slice(1, 3) + + // ES's hostParts are sometimes the other way round, if the value that is expected + // to be region equals ‘es’ switch them back + // e.g. search-cluster-name-aaaa00aaaa0aaa0aaaaaaa0aaa.us-east-1.es.amazonaws.com + if (hostParts[1] === 'es') + hostParts = hostParts.reverse() + + return hostParts +} + +// http://docs.aws.amazon.com/general/latest/gr/rande.html +RequestSigner.prototype.isSingleRegion = function() { + // Special case for S3 and SimpleDB in us-east-1 + if (['s3', 'sdb'].indexOf(this.service) >= 0 && this.region === 'us-east-1') return true + + return ['cloudfront', 'ls', 'route53', 'iam', 'importexport', 'sts'] + .indexOf(this.service) >= 0 +} + +RequestSigner.prototype.createHost = function() { + var region = this.isSingleRegion() ? '' : + (this.service === 's3' && this.region !== 'us-east-1' ? '-' : '.') + this.region, + service = this.service === 'ses' ? 'email' : this.service + return service + region + '.amazonaws.com' +} + +RequestSigner.prototype.prepareRequest = function() { + this.parsePath() + + var request = this.request, headers = request.headers, query + + if (request.signQuery) { + + this.parsedPath.query = query = this.parsedPath.query || {} + + if (this.credentials.sessionToken) + query['X-Amz-Security-Token'] = this.credentials.sessionToken + + if (this.service === 's3' && !query['X-Amz-Expires']) + query['X-Amz-Expires'] = 86400 + + if (query['X-Amz-Date']) + this.datetime = query['X-Amz-Date'] + else + query['X-Amz-Date'] = this.getDateTime() + + query['X-Amz-Algorithm'] = 'AWS4-HMAC-SHA256' + query['X-Amz-Credential'] = this.credentials.accessKeyId + '/' + this.credentialString() + query['X-Amz-SignedHeaders'] = this.signedHeaders() + + } else { + + if (!request.doNotModifyHeaders && !this.isCodeCommitGit) { + if (request.body && !headers['Content-Type'] && !headers['content-type']) + headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8' + + if (request.body && !headers['Content-Length'] && !headers['content-length']) + headers['Content-Length'] = Buffer.byteLength(request.body) + + if (this.credentials.sessionToken && !headers['X-Amz-Security-Token'] && !headers['x-amz-security-token']) + headers['X-Amz-Security-Token'] = this.credentials.sessionToken + + if (this.service === 's3' && !headers['X-Amz-Content-Sha256'] && !headers['x-amz-content-sha256']) + headers['X-Amz-Content-Sha256'] = hash(this.request.body || '', 'hex') + + if (headers['X-Amz-Date'] || headers['x-amz-date']) + this.datetime = headers['X-Amz-Date'] || headers['x-amz-date'] + else + headers['X-Amz-Date'] = this.getDateTime() + } + + delete headers.Authorization + delete headers.authorization + } +} + +RequestSigner.prototype.sign = function() { + if (!this.parsedPath) this.prepareRequest() + + if (this.request.signQuery) { + this.parsedPath.query['X-Amz-Signature'] = this.signature() + } else { + this.request.headers.Authorization = this.authHeader() + } + + this.request.path = this.formatPath() + + return this.request +} + +RequestSigner.prototype.getDateTime = function() { + if (!this.datetime) { + var headers = this.request.headers, + date = new Date(headers.Date || headers.date || new Date) + + this.datetime = date.toISOString().replace(/[:\-]|\.\d{3}/g, '') + + // Remove the trailing 'Z' on the timestamp string for CodeCommit git access + if (this.isCodeCommitGit) this.datetime = this.datetime.slice(0, -1) + } + return this.datetime +} + +RequestSigner.prototype.getDate = function() { + return this.getDateTime().substr(0, 8) +} + +RequestSigner.prototype.authHeader = function() { + return [ + 'AWS4-HMAC-SHA256 Credential=' + this.credentials.accessKeyId + '/' + this.credentialString(), + 'SignedHeaders=' + this.signedHeaders(), + 'Signature=' + this.signature(), + ].join(', ') +} + +RequestSigner.prototype.signature = function() { + var date = this.getDate(), + cacheKey = [this.credentials.secretAccessKey, date, this.region, this.service].join(), + kDate, kRegion, kService, kCredentials = credentialsCache.get(cacheKey) + if (!kCredentials) { + kDate = hmac('AWS4' + this.credentials.secretAccessKey, date) + kRegion = hmac(kDate, this.region) + kService = hmac(kRegion, this.service) + kCredentials = hmac(kService, 'aws4_request') + credentialsCache.set(cacheKey, kCredentials) + } + return hmac(kCredentials, this.stringToSign(), 'hex') +} + +RequestSigner.prototype.stringToSign = function() { + return [ + 'AWS4-HMAC-SHA256', + this.getDateTime(), + this.credentialString(), + hash(this.canonicalString(), 'hex'), + ].join('\n') +} + +RequestSigner.prototype.canonicalString = function() { + if (!this.parsedPath) this.prepareRequest() + + var pathStr = this.parsedPath.path, + query = this.parsedPath.query, + headers = this.request.headers, + queryStr = '', + normalizePath = this.service !== 's3', + decodePath = this.service === 's3' || this.request.doNotEncodePath, + decodeSlashesInPath = this.service === 's3', + firstValOnly = this.service === 's3', + bodyHash + + if (this.service === 's3' && this.request.signQuery) { + bodyHash = 'UNSIGNED-PAYLOAD' + } else if (this.isCodeCommitGit) { + bodyHash = '' + } else { + bodyHash = headers['X-Amz-Content-Sha256'] || headers['x-amz-content-sha256'] || + hash(this.request.body || '', 'hex') + } + + if (query) { + queryStr = encodeRfc3986(querystring.stringify(Object.keys(query).sort().reduce(function(obj, key) { + if (!key) return obj + obj[key] = !Array.isArray(query[key]) ? query[key] : + (firstValOnly ? query[key][0] : query[key].slice().sort()) + return obj + }, {}))) + } + if (pathStr !== '/') { + if (normalizePath) pathStr = pathStr.replace(/\/{2,}/g, '/') + pathStr = pathStr.split('/').reduce(function(path, piece) { + if (normalizePath && piece === '..') { + path.pop() + } else if (!normalizePath || piece !== '.') { + if (decodePath) piece = decodeURIComponent(piece) + path.push(encodeRfc3986(encodeURIComponent(piece))) + } + return path + }, []).join('/') + if (pathStr[0] !== '/') pathStr = '/' + pathStr + if (decodeSlashesInPath) pathStr = pathStr.replace(/%2F/g, '/') + } + + return [ + this.request.method || 'GET', + pathStr, + queryStr, + this.canonicalHeaders() + '\n', + this.signedHeaders(), + bodyHash, + ].join('\n') +} + +RequestSigner.prototype.canonicalHeaders = function() { + var headers = this.request.headers + function trimAll(header) { + return header.toString().trim().replace(/\s+/g, ' ') + } + return Object.keys(headers) + .sort(function(a, b) { return a.toLowerCase() < b.toLowerCase() ? -1 : 1 }) + .map(function(key) { return key.toLowerCase() + ':' + trimAll(headers[key]) }) + .join('\n') +} + +RequestSigner.prototype.signedHeaders = function() { + return Object.keys(this.request.headers) + .map(function(key) { return key.toLowerCase() }) + .sort() + .join(';') +} + +RequestSigner.prototype.credentialString = function() { + return [ + this.getDate(), + this.region, + this.service, + 'aws4_request', + ].join('/') +} + +RequestSigner.prototype.defaultCredentials = function() { + var env = process.env + return { + accessKeyId: env.AWS_ACCESS_KEY_ID || env.AWS_ACCESS_KEY, + secretAccessKey: env.AWS_SECRET_ACCESS_KEY || env.AWS_SECRET_KEY, + sessionToken: env.AWS_SESSION_TOKEN, + } +} + +RequestSigner.prototype.parsePath = function() { + var path = this.request.path || '/', + queryIx = path.indexOf('?'), + query = null + + if (queryIx >= 0) { + query = querystring.parse(path.slice(queryIx + 1)) + path = path.slice(0, queryIx) + } + + // S3 doesn't always encode characters > 127 correctly and + // all services don't encode characters > 255 correctly + // So if there are non-reserved chars (and it's not already all % encoded), just encode them all + if (/[^0-9A-Za-z!'()*\-._~%/]/.test(path)) { + path = path.split('/').map(function(piece) { + return encodeURIComponent(decodeURIComponent(piece)) + }).join('/') + } + + this.parsedPath = { + path: path, + query: query, + } +} + +RequestSigner.prototype.formatPath = function() { + var path = this.parsedPath.path, + query = this.parsedPath.query + + if (!query) return path + + // Services don't support empty query string keys + if (query[''] != null) delete query[''] + + return path + '?' + encodeRfc3986(querystring.stringify(query)) +} + +aws4.RequestSigner = RequestSigner + +aws4.sign = function(request, credentials) { + return new RequestSigner(request, credentials).sign() +} diff --git a/src/node_modules/aws4/lru.js b/src/node_modules/aws4/lru.js new file mode 100644 index 0000000..333f66a --- /dev/null +++ b/src/node_modules/aws4/lru.js @@ -0,0 +1,96 @@ +module.exports = function(size) { + return new LruCache(size) +} + +function LruCache(size) { + this.capacity = size | 0 + this.map = Object.create(null) + this.list = new DoublyLinkedList() +} + +LruCache.prototype.get = function(key) { + var node = this.map[key] + if (node == null) return undefined + this.used(node) + return node.val +} + +LruCache.prototype.set = function(key, val) { + var node = this.map[key] + if (node != null) { + node.val = val + } else { + if (!this.capacity) this.prune() + if (!this.capacity) return false + node = new DoublyLinkedNode(key, val) + this.map[key] = node + this.capacity-- + } + this.used(node) + return true +} + +LruCache.prototype.used = function(node) { + this.list.moveToFront(node) +} + +LruCache.prototype.prune = function() { + var node = this.list.pop() + if (node != null) { + delete this.map[node.key] + this.capacity++ + } +} + + +function DoublyLinkedList() { + this.firstNode = null + this.lastNode = null +} + +DoublyLinkedList.prototype.moveToFront = function(node) { + if (this.firstNode == node) return + + this.remove(node) + + if (this.firstNode == null) { + this.firstNode = node + this.lastNode = node + node.prev = null + node.next = null + } else { + node.prev = null + node.next = this.firstNode + node.next.prev = node + this.firstNode = node + } +} + +DoublyLinkedList.prototype.pop = function() { + var lastNode = this.lastNode + if (lastNode != null) { + this.remove(lastNode) + } + return lastNode +} + +DoublyLinkedList.prototype.remove = function(node) { + if (this.firstNode == node) { + this.firstNode = node.next + } else if (node.prev != null) { + node.prev.next = node.next + } + if (this.lastNode == node) { + this.lastNode = node.prev + } else if (node.next != null) { + node.next.prev = node.prev + } +} + + +function DoublyLinkedNode(key, val) { + this.key = key + this.val = val + this.prev = null + this.next = null +} diff --git a/src/node_modules/aws4/package.json b/src/node_modules/aws4/package.json new file mode 100644 index 0000000..0eba852 --- /dev/null +++ b/src/node_modules/aws4/package.json @@ -0,0 +1,107 @@ +{ + "_args": [ + [ + "aws4@1.8.0", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "aws4@1.8.0", + "_id": "aws4@1.8.0", + "_inBundle": false, + "_integrity": "sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ==", + "_location": "/aws4", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "aws4@1.8.0", + "name": "aws4", + "escapedName": "aws4", + "rawSpec": "1.8.0", + "saveSpec": null, + "fetchSpec": "1.8.0" + }, + "_requiredBy": [ + "/request" + ], + "_resolved": "https://registry.npmjs.org/aws4/-/aws4-1.8.0.tgz", + "_spec": "1.8.0", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Michael Hart", + "email": "michael.hart.au@gmail.com", + "url": "http://github.com/mhart" + }, + "bugs": { + "url": "https://github.com/mhart/aws4/issues" + }, + "description": "Signs and prepares requests using AWS Signature Version 4", + "devDependencies": { + "mocha": "^2.4.5", + "should": "^8.2.2" + }, + "homepage": "https://github.com/mhart/aws4#readme", + "keywords": [ + "amazon", + "aws", + "signature", + "s3", + "ec2", + "autoscaling", + "cloudformation", + "elasticloadbalancing", + "elb", + "elasticbeanstalk", + "cloudsearch", + "dynamodb", + "kinesis", + "lambda", + "glacier", + "sqs", + "sns", + "iam", + "sts", + "ses", + "swf", + "storagegateway", + "datapipeline", + "directconnect", + "redshift", + "opsworks", + "rds", + "monitoring", + "cloudtrail", + "cloudfront", + "codedeploy", + "elasticache", + "elasticmapreduce", + "elastictranscoder", + "emr", + "cloudwatch", + "mobileanalytics", + "cognitoidentity", + "cognitosync", + "cognito", + "containerservice", + "ecs", + "appstream", + "keymanagementservice", + "kms", + "config", + "cloudhsm", + "route53", + "route53domains", + "logs" + ], + "license": "MIT", + "main": "aws4.js", + "name": "aws4", + "repository": { + "type": "git", + "url": "git+https://github.com/mhart/aws4.git" + }, + "scripts": { + "test": "mocha ./test/fast.js ./test/slow.js -b -t 100s -R list" + }, + "version": "1.8.0" +} diff --git a/src/node_modules/azure-storage/.jshintignore b/src/node_modules/azure-storage/.jshintignore new file mode 100644 index 0000000..776bc01 --- /dev/null +++ b/src/node_modules/azure-storage/.jshintignore @@ -0,0 +1,3 @@ +node_modules/* +packages +.git \ No newline at end of file diff --git a/src/node_modules/azure-storage/.jshintrc b/src/node_modules/azure-storage/.jshintrc new file mode 100644 index 0000000..77840b6 --- /dev/null +++ b/src/node_modules/azure-storage/.jshintrc @@ -0,0 +1,32 @@ +{ + "bitwise": true, + "camelcase": true, + "curly": false, + "eqeqeq": false, + "forin": true, + "immed": true, + "indent": 2, + "latedef": true, + "maxparams": false, + "maxdepth": false, + "maxstatements": false, + "maxcomplexity": false, + "newcap": true, + "noarg": true, + "node": true, + "noempty": true, + "nonew": true, + "plusplus": false, + "quotmark": "single", + "regexp": true, + "sub": true, + "strict": false, + "trailing": true, + "undef": true, + "unused": true, + "shadow": true, + "globals": { + "FileReader": true, + "window": true + } +} \ No newline at end of file diff --git a/src/node_modules/azure-storage/.travis.yml b/src/node_modules/azure-storage/.travis.yml new file mode 100644 index 0000000..b0849c8 --- /dev/null +++ b/src/node_modules/azure-storage/.travis.yml @@ -0,0 +1,19 @@ +language: node_js +node_js: + - "6" + - "8" + - "10" + - "11" + +after_script: + - npm run coveralls + +install: + - npm --version + - npm install + +allow_failures: + - node_js: "5" + - node_js: "7" + +sudo: false \ No newline at end of file diff --git a/src/node_modules/azure-storage/BreakingChanges.md b/src/node_modules/azure-storage/BreakingChanges.md new file mode 100644 index 0000000..ac1ca74 --- /dev/null +++ b/src/node_modules/azure-storage/BreakingChanges.md @@ -0,0 +1,104 @@ +Tracking Breaking Changes in 2.0.0 + +ALL +* Fixed the issue that retry filter will continuously retry for client error like `ETIMEDOUT`. + +BLOB +* When specifying access condition `If-None-Match: *` for read, it will always fail. + +QUEUE +* `createMessage` callback has been changed from `errorOrResponse` to `errorOrResult` which contains `messageId`, `popReceipt`, `timeNextVisible`, `insertionTime` and `expirationTime`. It can be passed to `updateMessage` and `deleteMessage` APIs. + +Tracking Breaking Changes in 1.4.0 + +BLOB +* Changed `/S` of SpeedSummary to `/s`. + +FILE +* Changed `/S` of SpeedSummary to `/s`. + +Tracking Breaking Changes in 1.3.0 + +QUEUE +* Updated the `QueueMessageResult.dequeueCount` from `string` to `number`. + +Tracking Breaking Changes in 1.2.0 + +TABLE +* Beginning with version 2015-12-11, the Atom feed is no longer supported as a payload format for Table service operations. Version 2015-12-11 and later versions support only JSON for the payload format. + +Tracking Breaking Changes in 1.0.0 + +BLOB +* The `blob` property of BlobResult has been renamed to `name` to keep consistent with other services API and the `listBlob` API. +* Decoded the block name of LockListResult from base64 string to utf-8 string. + +QUEUE +* The `encodeMessage` flag of QueueService has been replaced by `messageEncoder` which support `TextBase64QueueMessageEncoder`, `BinaryBase64QueueMessageEncoder`, `TextXmlQueueMessageEncoder` and custom implementation of QueueMessageEncoder. + +Tracking Breaking Changes in 0.10.0 + +ALL +* The `signedIdentifiers` parameter and result properties have been changed from array to hash map to avoid non unique signed identifier id. + +BLOB +* The `contentType`, `contentEncoding`, `contentLanguage`, `contentDisposition`, 'contentMD5' and `cacheControl` parameters and return values about the blob's content settings are grouped into a `contentSettings` sub property. +* The `contentMD5` parameter to verify the integrity of the data during the transport is changed to `transactionalContentMD5` +* The `copy*` return values are grouped into a `copy` sub property. +* The `lease*` return values are grouped into a `lease` sub property. +* The options.accessConditions parameter is changed to AccessConditions type. + +QUEUE +* Renamed QueueResult.approximatemessagecount to camelCase and change its type to 'number'. +* Renamed the property names of QueueMessageResult to camelCase +* Renamed the parameter options.messagettl of the createMessage API to options.messageTimeToLive. +* Updated the callback of the createMessage API from errorOrResult to errorOrResponse. +* Removed peekOnly option from the options parameter of the getMessages API. To peek messages, use peekMessages instead. + +FILE +* The `contentType`, `contentEncoding`, `contentLanguage`, `contentDisposition`, 'contentMD5' and `cacheControl` parameters and return values about the blob's content settings are grouped into a `contentSettings` sub property. +* The `contentMD5` parameter to verify the integrity of the data during the transport is changed to `transactionalContentMD5` +* The `copy*` return values are grouped into a `copy` sub property. +* The options.accessConditions parameter is changed to AccessConditions type. + +TABLE +* Renamed the function updateEntity to replaceEntity. +* Renamed TableUtilities.entityGenerator.Entity to EntityProperty. + +Tracking Breaking Changes in 0.7.0 + +ALL +* The generateDevelopmentStorageCredendentials function in the azure-storage.js is renamed to generateDevelopmentStorageCredentials. + +BLOB +* The AppendFromLocalFile function in the blobservice.js is renamed to appendFromLocalFile. +* The AppendFromStream function in the blobservice.js is renamed to appendFromStream. +* The AppendFromText function in the blobservice.js is renamed to appendFromText. +* The properties in the properties object of BlobResult and ContainerResult when listing blobs or containers are moved to the result object. +* The property names returned from listing blobs or containers are changed to camelCase. +* The blob result is added to the result of BlobService.commitBlocks and the blob list information is embedded in it. + +FILE +* The properties in the properties object of FileResult and ShareResult when listing files or shares are moved to the result object. +* The property names returned from listing files or shares are changed to camelCase. +* The property names returned from getting share stats are changed to camelCase. + +Tracking Breaking Changes in 0.5.0 + +ALL +* The suffix "_HEADER" is removed from all the http header constants. +* The generateSharedAccessSignatureWithVersion function in each service is deprecated. +* The shouldRetry function in the retry policy filters takes a "requestOption" object instead of a "retryData" object. + +BLOB +* The "publicAccessLevel" parameter in the BlobService.setContainerACL function is moved into the "options" parameter. +* The properties in the BlobService.setBlobProperties function are moved from the "options" to the "properties" parameter. +* The "AccessPolicy.Permission" is renamed to "AccessPolicy.Permissions" on the result object of the BlobService.getContainerAcl function. + +TABLE +* The "signedIdentifiers" parameter in the TableService.SetTableACL function is moved out from the "options" parameter. +* The "AccessPolicy.Permission" is renamed to "AccessPolicy.Permissions" on the result object of the TableService.getTableAcl function. + +QUEUE +* The option "options.messagetext" is renamed to "options.messageText" in the QueueService.UpdateMessage function. +* The "AccessPolicy.Permission" is renamed to "AccessPolicy.Permissions" on the result object of the QueueService.getQueueAcl function. diff --git a/src/node_modules/azure-storage/CONTRIBUTING.md b/src/node_modules/azure-storage/CONTRIBUTING.md new file mode 100644 index 0000000..fa769e7 --- /dev/null +++ b/src/node_modules/azure-storage/CONTRIBUTING.md @@ -0,0 +1,83 @@ +# Contribute Code or Provide Feedback + +If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](https://azure.github.io/guidelines/). + +Look at issues in the repository labeled 'good first issue' to choose what you would like to jump into! + +## Project Setup +The Azure Storage development team uses Visual Studio Code so instructions will be tailored to that preference. However, any preferred IDE or other toolset should be usable. + +### Install +* Node v4 or above +* [Visual Studio Code](https://code.visualstudio.com/) + +### Development Environment Setup +To get the source code of the SDK via **git** just type: + +```bash +git clone https://github.com/Azure/azure-storage-node.git +cd ./azure-storage-node +``` + +Then, run NPM to install all the NPM dependencies: + +```bash +npm install +``` + +## Tests + +### Running +Unit tests don't require real credentials and don't require any environment variables to be set. By default the unit tests are run with Nock recording data. + +If you would like to run the unit test against a live storage account, you will need to setup environment variables which will be used. These test will use these credentials to run live tests against Azure with the provided credentials. Note that you will be charged for storage usage. You need verify the clean up script did its job at the end of a test run. + +Unit tests can then be run from root directory using: + +```bash +npm test +``` + +To run unit tests against live storage accounts, please set environment variable to turn off Nock by: + +```bash +export NOCK_OFF=true +``` + +and set up the following environment variables for storage account credentials by: + +```bash +export AZURE_STORAGE_CONNECTION_STRING="valid storage connection string" +export AZURE_STORAGE_CONNECTION_STRING_PREMIUM_ACCOUNT="optional valid storage connection string for premium storage account" +export AZURE_STORAGE_CONNECTION_STRING_SSE_ENABLED_ACCOUNT="optional valid storage connection string for storage account with storage service encryption enabled" +``` + +Note: `AZURE_STORAGE_CONNECTION_STRING_PREMIUM_ACCOUNT` and `AZURE_STORAGE_CONNECTION_STRING_SSE_ENABLED_ACCOUNT` are optional settings to enable testing suites related to premium storage account and storage service encryption, and only needed to be set when you are developing related features. + +### Testing Features +As you develop a feature, you'll need to write tests to ensure quality. Your changes should be covered by both unit tests. You should also run existing tests related to your change to address any unexpected breaks. + +## Pull Requests + +### Guidelines +The following are the minimum requirements for any pull request that must be met before contributions can be accepted. +* Make sure you've signed the [CLA](https://cla.azure.com/) before you start working on any change. +* Discuss any proposed contribution with the team via a GitHub issue **before** starting development. +* Code must be professional quality + * No style issues + * You should strive to mimic the style with which we have written the library + * Clean, well-commented, well-designed code + * Try to limit the number of commits for a feature to 1-2. If you end up having too many we may ask you to squash your changes into fewer commits. +* [ChangeLog.md](ChangeLog.md) needs to be updated describing the new change +* Thoroughly test your feature + +### Branching Policy +Changes should be based on the **dev** branch, not master as master is considered publicly released code. Each breaking change should be recorded in [BreakingChanges.md](BreakingChanges.md). + +### Adding Features for All Platforms +We strive to release each new feature for each of our environments at the same time. Therefore, we ask that all contributions be written for Node v4 and later. + +### Review Process +We expect all guidelines to be met before accepting a pull request. As such, we will work with you to address issues we find by leaving comments in your code. Please understand that it may take a few iterations before the code is accepted as we maintain high standards on code quality. Once we feel comfortable with a contribution, we will validate the change and accept the pull request. + +Thank you for any contributions! Please let the team know if you have any questions or concerns about our contribution policy. diff --git a/src/node_modules/azure-storage/CONTRIBUTORS.txt b/src/node_modules/azure-storage/CONTRIBUTORS.txt new file mode 100644 index 0000000..db0169a --- /dev/null +++ b/src/node_modules/azure-storage/CONTRIBUTORS.txt @@ -0,0 +1,7 @@ +Contributors should submit an update to this file with a commit in order to receive recognition. Thank you for your contributions. + +List of Contributors +==================== + +Microsoft Corporation +Microsoft Open Technologies, Inc \ No newline at end of file diff --git a/src/node_modules/azure-storage/ChangeLog.md b/src/node_modules/azure-storage/ChangeLog.md new file mode 100644 index 0000000..0efb549 --- /dev/null +++ b/src/node_modules/azure-storage/ChangeLog.md @@ -0,0 +1,846 @@ +Note: This is an Azure Storage only package. The all up Azure node sdk still has the old storage bits in there. In a future release, those storage bits will be removed and an npm dependency to this storage node sdk will +be taken. This is a GA release and the changes described below indicate the changes from the Azure node SDK 0.9.8 available here - https://github.com/Azure/azure-sdk-for-node. + +2019.04 Version 2.10.3 + +* Fixed callback not being called in _getBlobToLocalFile. +* Removed retryInfo.retryable check in retrypolicyfilter.js. +* Removed comment about maxResults. +* Fixed Travis-CI failed validation. +* Updated latest links and descriptions to V10 SDK in readme.md. +* Fixed some errors are thrown in a inner async callback which cannot be caught. + +2018.10 Version 2.10.2 + +ALL +* Upgrade `xmlbuilder` to 9.0.7 and `extend` to 3.0.2 to avoid vulnerabilities. +* Removed deprecated Buffer constructor calls in favor of static methods `Buffer.from` and `Buffer.alloc`. +* Added JSv10 link and docs.microsoft.com link. +* Improved documents. + +BLOB +* Added typescript declarations to `listBlobDirectoriesSegmented` and `listBlobDirectoriesSegmentedWithPrefix`. + +FILE +* Fixed an issue that empty text isn’t supported in `createFileFromText`. + +TABLE +* Fixed an issue that uncaught TypeError could be thrown from `createTable` when request is not sent properly. + +2018.08 Version 2.10.1 + +ALL +* Added a parameter `enableGlobalHttpAgent` to all services. To enable global HTTP(s) agent, please set `{blob|queue|table|file}Service.enableGlobalHttpAgent` to true. +* Fixed a bug that content type value is incorrect for json. + +2018.06 Version 2.10.0 + +ALL +* Updated storage service version to 2018-03-28. + +BLOB +* Fixed a bug that `DeleteRetentionPolicy.Days` should be `number` instead of `string` when calling `getServiceProperties`. +* Added a method `getAccountProperties` to `blobService`. +* Added a method `createBlockFromURL` to `blobService`. +* Added support for static website service properties (in preview). + +2018.05 Version 2.9.0-preview + +ALL +* Updated storage service version to 2017-11-09. +* Added `progress` event for `SpeedSummary` class, which will be triggered when every progress updates. +* Bumped version of request module from ~2.83.0 to ^2.86.0 to solve a vulnerability issue. + +BLOB +* Added `createBlobServiceWithTokenCredential()` to create `BlobService` object with bearer tokens such as OAuth access token (in preview). +* Added support for '$web' as a valid blob container name for static website. +* Added support for write-once read-many containers (in preview). +* The `Get Container Properties` and `List Containers` APIs now return two new properties indicating whether the container has an immutability policy or a legal hold. +* The `Get Blob Properties` and `List Blobs` APIs now return the creation time of the blob as a property. + +QUEUE +* Added `createQueueServiceWithTokenCredential()` to create `QueueService` object with bearer tokens such as OAuth access token (in preview). + +2018.05 Version 2.8.3 + +ALL +* Bumped version of request module from ~2.83.0 to ^2.86.0 to solve a vulnerability issue. + +2018.04 Version 2.8.2 + +ALL +* Improved JSDoc to clarify Node.js `Writable` and `Readable` stream. +* Updated CONTRIBUTION.md about the Node.js supported versions. + +BLOB +* Fixed a TypeScript definition error that `blobService.getUrl()` misses `snapshotId` parameter. +* Fixed a bug that `Speedsummary` doesn't get updated when downloading file/blob under 32MB. + +FILE +* Fixed a README.MD mistake for `FileService` example. + +2018.03 Version 2.8.1 + +ALL +* Updated request and validator package dependencies to reduce vulnerability. +* Fix a type assignment bug in tests for env variables. +* Improved documents. + +2018.02 Version 2.8.0 + +ALL +* Updated storage service version to 2017-07-29. + +BLOB +* Added support for Soft Delete feature. +* Fixed several blobuploader example issues. +* Fixed a compatibility issue for `getBlobToLocalFile` and `createReadStream` with Node.js v9. +* Fixed a bug that blob name will be wrongly trimmed to empty string when listing blobs. +* Fixed a bug when blob size > 32M, GetBlobStream returns root blob data instead of snapshot data. + +FILE +* Fixed a compatibility issue for `getFileToLocalFile` and `createReadStream` with Node.js v9. + +2017.12 Version 2.7.0 + +ALL +* Default HTTP connection will enable keep-alive to improve performance. + +BLOB +* Added support for `getBlobProperties`, `listBlobsSegmented` and `listBlobsSegmentedWithPrefix` to return `AccessTierChangeTime` and `AccessTierInferred` properties. +* Fixed a blob lease support issue for `appendFromText` and `resizePageBlob`. + +TABLE +* Fixed an apostrophe missing issue in `TableQuery.where()` example. + +2017.10 Version 2.6.0 + +FILE + +* Added support for file share snapshot. + +2017.09 Version 2.5.0 + +ALL +* Optimized samples and documentation for retry policies. +* Added additional samples for blob and file. + +BLOB +* Optimized `commitBlocks` API implementation and documentation. + +FILE +* Added support for File metrics. + +2017.08 Version 2.4.0 + +ALL +* Fixed a TypeScript issue that `browserFile` should be `Object` type in the TypeScript definition file. + +BLOB +* Added support for getting and setting a tier for a block blob under a LRS Blob Storage Account from tiers hot, cool and archive. + +2017.08 Version 2.3.0 + +ALL +* Updated storage service version to 2017-04-17. For more information, please see - https://docs.microsoft.com/en-us/rest/api/storageservices/versioning-for-the-azure-storage-services +* Updated the dependency of the 'request' module to avoid security vulnerability: (https://snyk.io/test/npm/azure-storage). +* Added `requestServerEncrypted` property to `ServiceResponse` which indicates if the contents of the request have been successfully encrypted. +* Improved API documentation. + +BLOB +* PageBlobs: For Premium Accounts only, added support for getting and setting the tier on a page blob. The tier can also be set when creating or copying from an existing page blob. + +FILE +* Added `serverEncryped` property to `FileResult` and `DirectoryResult` which indicates if the file data and application metadata are completely encrypted using the specified algorithm on the server. +* Fixed a TypeScript issue that SharedAccessPermissions for FileUtilities is missing in type definition file. + +TABLE +* Fixed a typo in table query example for combineFilters function. + +2017.08 Version 2.2.2 + +ALL + +* Fixed a retry timeout issue during uploading. + +2017.07 Version 2.2.1 + +BLOB + +* Optimized memory usage especially for uploading blobs with large block size. + +2017.06 Version 2.2.0 + +ALL +* Fixed a TypeScript issue that callback of `createWriteStreamToBlockBlob` should be optional in the TypeScript definition file. +* Fixed a bug in TypeScript definition file about `AccessConditions` mismatches with `AccessCondition` which is exported in JavaScript. +* Fixed an undefined property issue in `BlobResult` object of the sample code. +* Updated node-uuid to uuid. +* Updated underscore version to 1.8.3. +* Updated validator version to 3.35.0. + +BLOB +* Added a `defaultEnableReuseSocket` option for `BlobService` to control reuseSocket settings. +* Fixed a hanging or silent failing issue for blob uploading under some situations. +* Fixed a bug that `doesBlobExist` does not support `snapshotId` parameter. +* Fixed a bug in `getBlobToLocalFile` that `fs` will throw exceptions instead of returning errors in callback. + +FILE +* Added a `defaultEnableReuseSocket` option for `FileService` to control reuseSocket settings. +* Fixed a hanging or silent failing issue for file uploading under some situations. +* Fixed a bug in `getFileToLocalFile` that `fs` will throw exceptions instead of returning errors in callback. + +2017.03 Version 2.1.0 + +ALL +* Fixed the type script issue that AccessConditions is missing in the type definition file. + +BLOB +* Added support for page blob incremental copy. Refer to https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob + +QUEUE +* Fixed the issue that `responseObject` may not have response body in `createMessage` function. + +BROWSER +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.1.0 + +2017.01 Version 2.0.0 + +ALL +* Updated storage service version to 2016-05-31. Fore more information, please see - https://msdn.microsoft.com/en-us/library/azure/dd894041.aspx +* Fixed the issue that `BatchOperation` doesn't support socket reuse for some node versions. +* Fixed the issue that `BatchOperation` request pool size is too big when the socket reuse is supported. +* Added empty headers to string-to-sign. +* For response has body and no `content-type` header, try to parse the body using xml format. +* Fixed the issue that retry filter will continuously retry for client error like `ETIMEDOUT`. +* Added support for client side timeout. In order to set the timeout per API, please use `options.clientRequestTimeoutInMs`. To set the default value for all requests made via a particular service, please use `{blob|queue|table|file}Service.defaultClientRequestTimeoutInMs`. + +BLOB +* Added support for large block blob. +* Added `publicAccessLevel` to `ContainerResult` for the APIs `listContainersSegmented` and `listContainersSegmentedWithPrefix`. +* When specifying access condition `If-None-Match: *` for reading, it will always fail. +* Returned content MD5 for range gets Blobs. +* Fixed the issue that `useTransactionalMD5` didn't take effect for downloading a big blob. + +QUEUE +* `createMessage` callback has been changed from `errorOrResponse` to `errorOrResult` which contains `messageId`, `popReceipt`, `timeNextVisible`, `insertionTime` and `expirationTime`. It can be passed to updateMessage and deleteMessage APIs. + +FILE +* Returned content MD5 for range gets Files. +* Fixed the issue that `useTransactionalMD5` didn't take effect for downloading a big file. +* Added support for listing files and directories with prefix, refer to `FileService.listFilesAndDirectoriesSegmentedWithPrefix`. + +TABLE +* Fixed the issue that response in incorrect for table batch operation when the error response item is not the first item in the responses. + +2016.11 Version 1.4.0 + +ALL +* Added `ENOTFOUND` for secondary endpoint and `ECONNREFUSED` to `RetryPolicyFilter`. +* Added support for `text/html` error response body. + +BLOB +* Fixed the issue that the ChunkAllocator maxCount is aligned with parallelOperationThreadCount. +* Changed `/S` of SpeedSummary to `/s`. +* Fixed the issue that `BlobService.createBlockBlobFromText` will hang when passed `null` or `undefined` `text` argument. +* Fixed the issue that `BlobService.createBlockBlobFromText` will always set `content-type` to `text/plain`. + +QUEUE +* Allowed `QueueService.peekMessages` against secondary endpoint. + +FILE +* Fixed the issue that the ChunkAllocator maxCount is aligned with parallelOperationThreadCount. +* Changed `/S` of SpeedSummary to `/s`. + +2016.10 Version 1.3.2 + +BLOB +* Prevent a blockId from being generated with a decimal point. + +2016.09 Version 1.3.1 + +ALL +* Improved the type script support. + +2016.08 Version 1.3.0 + +ALL + +* Fixed the issue that retry filter will fail against storage emulator. +* Fixed a hang issue of `StorageServiceClient` with retry policy filter set when retrying sending the request, the stream is not readable anymore. +* Updated the default value of `CorsRule.ExposedHeaders`, `CorsRule.AllowedHeaders` to empty and `CorsRule.MaxAgeInSeconds` to `0` for `setServiceProperties` APIs of all services. +* Fixed the issue that service SAS doesn't work if specifying the `AccessPolicy.Protocols`. + +BLOB +* Added the API `BlobService.getPageRangesDiff` for getting the page ranges difference. Refer to https://msdn.microsoft.com/en-us/library/azure/mt736912.aspx for more detailed information. + +QUEUE +* Updated the `QueueMessageResult.dequeueCount` from `string` to `number`. +* Added the API `QueueService.getUrl` for getting the queue url. + +TABLE +* Added the API `TableService.getUrl` for getting the table url. + +2016.07 Version 1.2.0 + +ALL +* Fixed the issue that metadata name will be converted to lower-case after retrieving back from the server. **Note** that this fix is only applicable for Node 0.12 or higher version. +* Added support for EndpointSuffix for all service constructors. +* Updated storage service version to 2015-12-11. Fore more information, please see - https://msdn.microsoft.com/en-us/library/azure/dd894041.aspx +* Updated the `request` package to version 2.74.0 to address the security vulnerability - https://nodesecurity.io/advisories/130 + +BLOB +* Fixed the issue that the service error message will be written to the destination stream if getting error when downloading the blob to a stream/file. +* Added `serverEncryped` property to `BlobResult` class which indicates if the blob data and application metadata are completely encrypted using the specified algorithm on the server. + +FILE +* Fixed the issue that the service error message will be written to the destination stream if getting error when downloading the file to a stream/file. + +TABLE +* The atom feed payload format is not supported anymore for table service APIs. + +2016.06 Version 1.1.0 + +ALL +* Fixed the issue that using SAS doesn't work against storage emulator. +* Fixed the issue that the service SAS signature is incorrect when protocol parameter is specified. +* Fixed the issue that the timeout query string should be in seconds instead of milliseconds. + +BLOB +* Added parameter snapshotId to BlobService.getUrl function to support getting url of a specified snapshot. +* Fixed the issue that the getUrl doesn't work against storage emulator. +* Fixed the race issue that the _rangeList may be deleted before using it in the BlockRangeStream._getTypeList function. +* Fixed the issue that downloading block blob with size bigger than 32MB will fail when using anonymous credential. +* Added `CREATE` to `BlobUtilities.SharedAccessPermissions`. + +TABLE +* Supported string type value for entity PartionKey and RowKey. +* Supported implicit Edm type value for entity properties. The supported implicit Edm types including Int32, Double, Bool, DateTime and String. + +FILE +* Fixed the issue that the getUrl doesn't work against storage emulator. +* Added `CREATE` to `FileUtilities.SharedAccessPermissions`. + +2016.05 Version 1.0.1 + +ALL +* Fixed the issue that StorageServiceClient._normalizeError will throw exception on Node below v4 because string.startsWith is not available on Node below v4. + +2016.05 Version 1.0.0 + +BLOB +* The `blob` property of BlobResult has been renamed to `name` to keep consistent with other services API and the `listBlob` API. +* Decoded the block name of LockListResult from base64 string to utf-8 string. + +QUEUE +* The `encodeMessage` flag of QueueService has been replaced by `messageEncoder` which support `TextBase64QueueMessageEncoder`, `BinaryBase64QueueMessageEncoder`, `TextXmlQueueMessageEncoder` and custom implementation of QueueMessageEncoder. + +TABLE +* Fixed the issue that loses the data type for Edm.Double value like: 1.0. +* Fixed the issue that loses the data precision for Edm.Int64 value when it is outisde of the range (2^53 - 1) to -(2^53 - 1). + +2016.03 Version 0.10.0 + +ALL +* The `signedIdentifiers` parameter and result properties have been changed from array to hash map to avoid non unique signed identifier id. + +BLOB +* Added 'COPY' to the BlobUtilities.BlobListingDetails to include copy information in the results. +* Added 'bytesCopied' and 'totalBytes' to the blob result. +* Added the blob result to the callback of BlobService.commitBlocks. +* Moved the properties in the properties object of BlobResult and ContainerResult when listing blobs or containers to the result object. +* Renamed the property names returned from listing blobs or containers to camelCase. +* The `contentType`, `contentEncoding`, `contentLanguage`, `contentDisposition`, 'contentMD5' and `cacheControl` parameters and return values about the blob's content settings are grouped into a `contentSettings` sub property. +* The `contentMD5` parameter to verify the integrity of the data during the transport is changed to `transactionalContentMD5` +* The `copy*` return values are grouped into a `copy` sub property. +* The `lease*` return values are grouped into a `lease` sub property. +* The options.accessConditions parameter is changed to AccessConditions type. + +QUEUE +* Renamed QueueResult.approximatemessagecount to camelCase and change its type to 'number. +* Renamed the property names of QueueMessageResult to camelCase. +* Renamed the parameter options.messagettl of the createMessage API to options.messageTimeToLive. +* Updated the callback of the createMessage API from errorOrResult to errorOrResponse. +* Removed peekOnly option from the options parameter of the getMessages API. To peek messages, use peekMessages instead. +* Added getMessage and peekMessage API. + +FILE +* Moved the properties in the properties object of FileResult and ShareResult when listing files or shares to the result object. +* Renamed the property names returned from listing files or shares to camelCase. +* Renamed the property names returned from getting share stats to camelCase. +* The `contentType`, `contentEncoding`, `contentLanguage`, `contentDisposition`, 'contentMD5' and `cacheControl` parameters and return values about the blob's content settings are grouped into a `contentSettings` sub property. +* The `contentMD5` parameter to verify the integrity of the data during the transport is changed to `transactionalContentMD5` +* The `copy*` return values are grouped into a `copy` sub property. +* Fixed the issue that SAS tokens created from table names with upper-case letters do not work. +* The options.accessConditions parameter is changed to AccessConditions type. + +TABLE +* Fixed the issue that getTableAcl returns empty array with signedIdentifier property. +* Renamed the function updateEntity to replaceEntity. +* Renamed TableUtilities.entityGenerator.Entity to EntityProperty. + +2016.03 Version 0.9.0 + +ALL +* Updated storage service version to 2015-04-05. Fore more information, please see - https://msdn.microsoft.com/en-us/library/azure/dd894041.aspx +* Added support for Account SAS. +* Added support for IPACL and Protocols options for service SAS. +* Fixed the issue where the authentication may fail when a metadata key is in upper case. +* Added 'nsp check' task for security vulnerability check. +* Updated the dependency of the 'request' module to avoid the security vulnerability reported by the 'nsp' tool. + +BLOB +* Added new permission 'ADD' for service SAS. + +FILE +* Added support for metrics setting for service properties. + +2016.01 Version 0.8.0 + +ALL +* Preview release of the TypeScript definition file at "lib/azure-storage.d.ts". + +BLOB +* Added the blob result to the callback of BlobService.commitBlocks. +* Added the speed summary to the downloading APIs. + +2015.12 Version 0.7.0 + +ALL +* Fixed the typo in the function generateDevelopmentStorageCredentials. +* Fixed the issue that the HTTP global agent setting is changed during parallel uploading and downloading and impacts on other Node.js applications. +* Fixed the issue that the chunked stream writing methods do not accept string. +* Fixed the issue that the request fails when the content-length is set to string '0' in the 'sendingRequestEvent' event handler. +* Supported retry on XML parsing errors when the XML in the response body is corrupted. +* Replaced the dependency "mime" to "browserify-mime" to work with Browserify. + +BLOB +* Added an option to skip the blob or file size checking prior to the actual downloading. +* Fixed the issue that it doesn't callback when loses the internet connection during uploading/uploading. +* Fixed the issue that the local file cannot be removed in the callback when uploading a blob from a local file. +* Fixed the issue that the stream length doesn't work when it is larger than 32MB in the createBlockBlobFromStream, createPageBlobFromStream, createAppendBlobFromStream and appendFromStream functions. +* Fixed the issue that it doesn't return error in the page range validation when the size exceeds the limit. +* Renamed the function AppendFromLocalFile to appendFromLocalFile. +* Renamed the function AppendFromStream to appendFromStream. +* Renamed the function AppendFromText to appendFromText. + +TABLE +* Fixed the issue that listTablesSegmentedWithPrefix with maxResult option throws exception. + +2015.09 Version 0.6.0 + +ALL +* Fixed the MD5 mismatch issue in uploading a blob when running with Node.js v4.0 or io.js. + +BLOB +* Fixed the issue that it doesn't prompt appropriate error message when the source URI is missing in BlobService.startCopyBlob(). + +2015.08 Version 0.5.0 + +ALL +* Updated storage service version to 2015-02-21. For more information, please see - https://msdn.microsoft.com/en-us/library/azure/dd894041.aspx +* Unified the function parameters for setting ACL of container, table, queue and share. +* Renamed 'AccessPolicy.Permission' to 'AccessPolicy.Permissions' on the result object from get*Acl to match the property name on the signedIdentifier property passed to set*Acl calls. +* Unified the name pattern of the http header constants. +* Split the property parameters from the options parameter for setting properties of blob, share and file. +* Updated the error message when an argument is in a wrong type while it requires a string. +* Exported AccessCondition to generate an object that represents a condition. +* Fixed an issue that the SAS in the SharedAccessSignature part of the connection string cannot start with '?'. +* Deprecated the generateSharedAccessSignatureWithVersion() method in each service. + +BLOB +* Supported operating against append blobs. +* Fixed an issue that the file descriptor in the FileReadStream is not closed. + +QUEUE +* Fixed an issue that options.messageText doesn't work in QueueService.updateMessage. + +2015.06 Version 0.4.5 + +* Updated the dependency of the 'request' module to avoid the security vulnerability reported by the 'nsp' tool: (https://nodesecurity.io/advisories/qs_dos_extended_event_loop_blocking) and (https://nodesecurity.io/advisories/qs_dos_memory_exhaustion). +* Included package validation in grunt tasks. + +2015.05 Version 0.4.4 + +ALL +* Updated the dependency of the 'validator' module to avoid the security vulnerability reported by the 'nsp' tool. (https://nodesecurity.io/advisories/validator-isurl-denial-of-service) +* Updated the error message when an argument is in a wrong type while it requires a string. +* Updated the grunt file to run test with mocha and generate jsDoc. + +BLOB +* Fixed an issue that the metadata is duplicated when creating a page blob. +* Fixed an issue that the metadata is duplicated when setting blob's metadata with metadata in the options. +* Fixed an issue that cannot create an empty block blob with useTransactionalMD5 option. + +FILE +* Fixed an issue that the result of file downloading contains wrong values for share, directory or file names. + +TABLE +* Fixed an issue that it prompts "Cannot set property 'isSuccessful' of null" when TableService.createTableIfNotExists is called. + +2015.03 Version 0.4.3 + +ALL +* Fixed an issue that setting metadata keys are converted into lowercase. The metadata keys retrieved from the service will however still be converted into lowercase by the http component of Node.js.(https://github.com/joyent/node/issues/1954) +* Included all storage error code strings in the error constants definition. +* Documented the client request ID option in all APIs. + +BLOB +* Supported listing blob virtual directories. +* Fixed an issue that exception is thrown when downloading a blob larger than 32MB to a stream. +* Fixed an issue that the process exits when the free memory is low. + +2014.12 Version 0.4.2 + +ALL +* Fixed an issue that batch operation could probably wait without callback. +* Added the readable-stream module to adapt stream operations in both node 0.8 and node 0.10. +* Supported nock in tests. + +BLOB +* Supported trimming the default port of http or https when getting URL for the blob service. +* Fixed an issue that the metadata is not populated when getting the blob to text. + +FILE +* Supported trimming the default port of http or https when getting URL for the file service. + +2014.11.28 Version 0.4.1 + +ALL +* Fixed an issue where the request does not invoke the callback when the input stream ends with an 'end' event instead of a 'finish' event. +* Fixed an issue where the request does not invoke the callback when the input stream ends with a 'close' event on Node 0.8.X. +* Fixed an issue that the temporary files generated by unit tests are not cleaned up. +* Fixed an issue that the unit tests may fail when the file generated by previous tests is not accessible temporarily. + +FILE +* Added support to download a single file in parallel similar to upload. You can set ‘parallelOperationThreadCount’ option for api’s that download a file to indicate number of parallel operations to use for download. + +TABLE +* Fixed an issue which caused invalid input errors when the partition key or the row key contains an apostrophe. + +2014.10.28 Version 0.4.0 + +ALL +* Provide an option to enable/disable nagling. Nagling is disabled by default. It can be enabled by setting options.useNagleAlgorithm to true. +* Added batch operation callback in sequence mode. + +BLOB +* Added support to download a single blob in parallel similar to upload. You can set ‘parallelOperationThreadCount’ option for api’s that download a blob to indicate number of parallel operations to use for download. +* Added speed summary in blob downloading. + +FILE +* Fixed an issue which caused an invalid resource name error when the directory name starts or ends with a '/' + +2014.08.20 Version 0.3.3 + +BLOB +* Fixed an issue where SAS tokens were being incorrectly generated for the root container and when the blob name required encoding. +* Documented the 'parallelOperationThreadCount' option as input to various uploadBlob APIs. + +FILE +* Fixed an issue where signing was incorrect when the URI contained '.' or '..'. +* Fixed an issue where "getURI" was requiring a file parameter, although the parameter should be optional. + +2014.07.25 Version 0.3.2 + +ALL +* Fixed an issue which prevented transient server errors like ECONNRESET, ESOCKETTIMEDOUT and ETIMEDOUT from being retried. + +BLOB +* Fixed an issue which caused a reference error in blobs due to 'err' not being defined. + +2014.07.22 Version 0.3.1 + +ALL +* Fixed an issue which failed to validate special names for containers and tables. +* Exposed the Validation utility methods so users can use it to validate resource names. + +BLOB +* Fixed an issue which caused failures when an error was encountered while uploading big blobs. + +2014.07.07 Version 0.3.0 + +BLOB +* Fixed an issue which failed to return single item blocklists while doing listBlocks. + +FILE +* Added File Service support. The File Service and the associated APIs are in preview. + +2014.07.01 Version 0.2.1 + +ALL +* Fixed an issue with casing which caused the module to fail on linux machines. + +BLOB +* Fixed an issue which failed to upload an empty blob for empty block blob files uploaded using createBlockBlobFromFile when retry policies were used. + +2014.06.16 Version 0.2.0 + +ALL +* Updated storage service version to 2014-02-14. The SAS tokens generated will therefore contain a signed version of 2014-02-14 and all the requests using SAS credentials have the api-version query parameter appended to the URI. + +2014.06.12 Version 0.1.0 + +ALL +* The package has been renamed to azure-storage. +* There is no separate azure-common sub-package. Everything is condensed into a single package called azure-storage. +* azure.create*Service supports the following configurations: + * create*Service(connection string) + * create*Service(account name, account key) + * create*Service(storage host, sasToken) +* Added the ability to choose which SAS Version to use when generating Shared Access Signatures (either the 2012-02-12 or 2013-08-15 versions). +* Host can be given as either an object with primaryHost and secondaryHost or as a string representing the primary. +* The order credentials are assessed is consistent: If parameters are passed in, they and they alone are honored. There is no mixing (ex, if users pass in host, they won't get the access key from your env var). If no parameters are passed in, the env vars are used. Within both those strategies, the order of evaluation is emulator (in the case of EMULATED env var only), connection string, account/key or SAS, anonymous. If account/key and SAS are passed in, an error is thrown indicating the credentials are invalid. +* Removed the ability to create a storage service from a config file. +* azure.create*Service supports secondaryHost only, but either primaryHost or secondaryHost must be provided. +* Storage service client constructors take the sasToken rather than a credentials object, meaning that the SharedAccessSignature class no longer needs to be used by consumers. +* Ensure request is always built/signed for every retry attempt. +* Custom retry policies should implement the shouldRetry method and return an object with the retryInterval and retryable information in it. Optionally, they can also set the locationMode and targetLocation to which the request should be sent. +* Implemented RA-GRS support. For more information on this, please see - http://blogs.msdn.com/b/windowsazurestorage/archive/2013/12/04/introducing-read-access-geo-replicated-storage-ra-grs-for-windows-azure-storage.aspx +* Service properties updated: replaced metrics with hour metrics and minute metrics, added cors support, added defaults if properties are unspecified to reduce invalid xml exceptions. +* Added maximum execution time, settable for all requests via {blob|queue|table}service.defaultMaximumExecutionTime or for an individual request via options.maximumExecutionTime. This applies to all requests except blob downloads. +* The blob, queue and table services no longer set/modify any values within the options object optionally passed in by the user for every API. +* Fixed an issue where null, empty and white-space only metadata header values were allowed. +* "timeout" query parameter will not be sent to the server if not set by the user. In order to set the timeout per API, please use options.timeoutIntervalInMs. To set it for all requests made via a particular service, please use {blob|queue|table}Service.defaultTimeoutIntervalInMs. +* ECONNRESET is handled in the retry policy filter. +* DevStore with secondary access is supported. +* Set*Acl takes signed identifiers as a parameter - before it was in options. +* GenerateSharedAccessSignature() produces a query string rather than an object. +* The SharedKey class has the methods to create shared access signatures. These should not be in the SharedAccessSignature class because one can only create a SAS if they have the account/key. +* The service client no longer evaluates the port and protocol set on it. Before, if storageserviceclient.host was called and port and protocol had been changed after host was set, host would be incorrect. +* Removed SharedKeyLite and SharedKeyLiteTable in favor of SharedKey and SharedKeyTable which were already the defaults. +* Added separate utility files for Blob, Queue, Table, and Storage. These contain useful enums that were previously found in a single Constants file as well as new utility functions. The old Constants file will mostly include internal-use constants. + +BLOB +* createBlobServiceWithSas has been provided to create a blobService to use with a Shared Access Signature. Users can create a blobService using azure.createBlobServiceWithSas(host, sasToken). +* Anonymous access for the blob service is supported. +* Added support for 2013-08-15 Blob SAS changes. +* Renamed listBlobs to listBlobsSegmented and added listBlobsSegmentedWithPrefix. listBlobsSegmented takes in currentToken and listBlobsSegmentedWithPrefix takes in prefix and currentToken along with other parameters. Please look at the documentation of these APIs for a list of parameters that users can set on the options object. These APIs return only error, result and response. The result contains entries which is a list of blobs and a continuation token for successive listing operations. +* Renamed copyBlob to startCopyBlob. +* Renamed putBlockBlobFromStream to _putBlockBlobFromStream and putBlockBlobFromFile to _putBlockBlobFromFile. These are internal methods and should not be called by users directly. Please use createBlockBlobFromStream and createBlockBlobFromFile. +* Renamed page methods: listBlobRegions to listPageRanges, createBlobPagesFromText to createPagesFromText, clearBlobPages to clearPageRanges. +* Renamed block methods: createBlobBlockFromStream to createBlockFromStream, createBlobBlockFromText to createBlockFromText, commitBlobBlocks to commitBlocks, listBlobBlocks to listBlocks. +* BlobService lease methods also work with container leases, simply specify null for the blob parameter. +* Added changeLease method to modify the lease ID of an active lease. +* Fixed lease issues, break lease allows a leaseBreakPeriod of 0, lease time header is returned as an int rather than a string. +* Added snapshot delete options: when deleting a blob with snapshots, users can decide to delete blob and snapshots or just snapshots. The options are provided in BlobUtilities.SnapshotDeleteOptions.*. +* deleteBlob callback does not return isSuccessful. +* Added does{Container|Blob}Exist methods in blobservice that can be used to determine if a particular container or blob exists. +* Added delete{Container|Blob}IfExists methods in blobservice that can be used to delete a container or a blob only when they exist. +* Added ContentMD5 validation for blob downloads. This is turned on by default for all block blob downloads and all page blob range downloads as long as the range is less than 4MB. In order to turn this off, set options.disableContentMD5Validation to true explicitly. +* x-ms-range-get-content-md5 is set internally by the library and users do not have to specify it in the options. +* Removed the option of doing range downloads while downloading a blob to text. +* Added sequence number support for page blobs. Users can set the sequence number of a page blob using blobService.setPageBlobSequenceNumber and manage concurrency issues using x-ms-if-sequence-number-le, x-ms-if-sequence-number-lt and x-ms-if-sequence-number-eq. Please see http://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more details about using sequence numbers. +* Removed createBlob API. Please use createPageBlob and createBlockBlob* APIs to create the respective blobs. +* Renamed createPagesFromText to _createPagesFromText. This is an internal method and should not be called by users directly. +* Added resizePageBlob that can be used to resize a page blob. +* createPageBlob takes in the blob's sequence number using options.sequenceNumber. +* All the BlobService APIs that took cacheControl/cacheControlHeader, contentType/contentTypeHeader, contentEncoding/contentEncodingHeader, contentLanguage/contentLanguageHeader, contentMD5/contentMD5Header in options only take cacheControl, contentType, contentEncoding, contentLanguage and contentMD5. These are the values that will be set on the blob at the server. +* createPagesFromStream takes in the following 2 options - useTransactionalMD5 to calculate and send/validate content MD5 for each transaction and contentMD5 which is an optional hash value. When contentMD5 is provided, the client library uses that instead of trying to calculate it based on the data being uploaded. +* createBlockBlobFromText, createBlockFromStream and createBlockFromText allows users to set useTransactionalMD5 on options to calculate and send/validate content MD5 for each transaction. +* Added createWriteStreamToBlockBlob, createWriteStreamToNewPageBlob and createWriteStreamToExistingPageBlob that provide a stream for writing to the blob. +* Added createReadStream that provides a stream for reading from the blob. +* blobService can have a default parallelOperationThreadCount which specifies the number of parallel upload operations that may be performed when uploading a blob that is greater than the value specified by singleBlobPutUploadThresholdInBytes. This value can be set using blobService.parallelOperationThreadCount. The default value set on blobService is 1. +* createBlockBlobFromText supports text Buffers in addition to strings. +* createBlockBlobFromText will throw an error if the uploaded content exceeds 64MB. +* BlobService getBlobUrl has been renamed to getUrl as it can also produce container urls. It can no longer create a SAS token (it used to take sharedAccessPolicy) but can take a sasToken produced by the generateSharedAccessSignature method and produce a url with that. + +QUEUE +* Added Shared Access Signatures for queues. createQueueServiceWithSas has been provided to create a queueService to use with a Shared Access Signature. Users can create a queueService using azure.createQueueServiceWithSas(host, sasToken). +* Renamed listQueues to listQueuesSegmented and added listQueuesSegmentedWithPrefix. listQueuesSegmented takes in currentToken and listQueuesSegmentedWithPrefix takes in prefix and currentToken along with other parameters. Please look at the documentation of these APIs for a list of parameters that users can set on the options object. These APIs return only error, result and response. The result contains entries which is a list of queues and a continuation token for successive listing operations. +* deleteQueue and deleteMessage callbacks do not return isSuccessful. +* Added an option in the queue service to turn base 64 encoding off. + +TABLE +* Added Shared Access Signatures for tables. createTableServiceWithSas has been provided to create a tableService to use with a Shared Access Signature. Users can create a tableService using azure.createTableServiceWithSas(host, sasToken). +* Added JSON support for tables and removed AtomPub. For the different flavours of JSON supported, please see - http://blogs.msdn.com/b/windowsazurestorage/archive/2013/12/05/windows-azure-tables-introducing-json.aspx. +* Renamed queryTables to listTablesSegmented and added listTablesSegmentedWithPrefix. listTablesSegmented takes in currentToken and listTablesSegmentedWithPrefix takes in prefix and currentToken along with options and callback. Please look at the documentation of these APIs for a list of parameters that users can set on the options object. These APIs return only error, result and response. The result contains entries which is a list of tables and a continuation token for successive listing operations. +* deleteTable and deleteEntity callbacks do not return isSuccessful. +* queryEntities in TableService returns 3 parameters instead of 4: the entities and queryResultContinuation parameters are returned within a queryResult object. +* Added support for disabling echo content in inserts. By default, inserts do not echo content. +* Changed table error parsing so that the code string and message string are directly accessible from the error object. +* Removed the getTable method; doesTableExist works similarly. +* createTable does not request content back from the service, but still returns an object containing the TableName. +* Renamed queryEntity in TableService to retrieveEntity. +* Modified how batches work. Instead of turning the service batch mode on/off, a batch is a separate entity like query. Batches are constructed and then executed. +* queryEntities in TableService requires a continuation token. +* TableService updateEntity, mergeEntity and deleteEntity do not take a checkEtag option. The entity's etag is sent if it exists, otherwise * is sent. +* retrieveEntity and queryEntities accept empty partition and row keys. +* Added entity-property-creation helper methods. Users can use the entityGenerator provided in TableUtilities and create entity properties as follows: + var entGen = TableUtilities.entityGenerator; + var entity = { PartitionKey: entGen.String('part2'), + RowKey: entGen.String('row1'), + boolValueTrue: entGen.Boolean(true), + boolValueFalse: entGen.Boolean(false), + intValue: entGen.Int32(42), + dateValue: entGen.DateTime(new Date(Date.UTC(2011, 10, 25))), + complexDateValue: entGen.DateTime(new Date(Date.UTC(2013, 02, 16, 01, 46, 20))) + }; +* Removed TableQuery whereKeys method. Instead of tableQuery.whereKeys, tableService.retrieveEntity should be used. +* Removed TableQuery whereNextKeys method. Instead of tableQuery.whereNextKeys, tableService.queryEntities takes currentToken which can be retrieved from a previous queryEntities call in results. +* Removed TableQuery from method. Instead of tableQuery.from, tableService.queryEntities takes table and tableQuery may be null to represent retrieving all entities in the table. +* TableQuery select is an instance method for consistency with top and where. Instead of TableQuery.select('foo'), use new TableQuery().select('foo'). +* TableQuery provides helper methods to create filter strings to use with the where clause for a query. Query strings may also include type specifiers where necessary. + For example, to query on a long value, users could do the following: + var tableQuery = new TableQuery().where(TableQuery.int64Filter('Int64Field', TableUtilities.QueryComparisons.EQUAL, '4294967296')); + OR + var tableQuery = new TableQuery().where('Int64Field == ?int64?', '4294967296'); + +2013.01.15 Version 0.8.0 +* Added the Preview Service Management libraries as separate modules +* Added ability to consume PEM files directly from the Service Management libraries +* Added support for createOrUpdate and createRegistrationId in the Notification Hubs libraries + +2013.01.10 Version 0.7.19 +* Lock validator version + +2013.11.27 Version 0.7.18 +* Lock xmlbuilder version + +2013.11.5 Version 0.7.17 +* Added getBlob and createBlob operations that support stream piping +* Added compute, management, network, serviceBus, sql, storage management, store and subscription preview wrappers +* Multiple bugfixes + +2013.10.16 Version 0.7.16 +* Improved API documentation +* Updated Virtual Machines API to 2013-06-01 +* Added website management preview wrappers +* Multiple bugfixes + +2013.08.19 Version 0.7.15 +* Multiple storage fixes +* Fix issue with Notification Hubs template message sending + +2013.08.12 Version 0.7.14 +* Multiple storage fixes +* Documentation improvements +* Added support for large blobs upload / download + +2013.08.08 Version 0.7.13 +* Lock request version + +2013.07.29 Version 0.7.12 +* Added MPNS support +* Added Service management vnet operations support + +2013.07.10 Version 0.7.11 +* Hooked up new configuration system to storage APIs +* Support for AZURE_STORAGE_CONNECTION_STRING environment variable +* Included API for websites management +* Fixed UTF-8 support in table batch submit + +2013.06.26 Version 0.7.10 +* Various fixes in storage APIs + +2013.06.19 Version 0.7.9 +* First part of new SDK configuration system +* Support for AZURE_SERVICEBUS_CONNECTION_STRING environment variable +* Updated SAS generation logic to include version number +* Infrastructure support for creating passwordless VMs + +2013.06.13 Version 0.7.8 +* Updates to HDInsight operations + +2013.06.06 Version 0.7.7 +* Added support for Android notification through Service Bus Notification Hubs +* Support prefixes when listing tables +* Support '$logs' as a valid blob container name to support reading diagnostic information +* Fix for network configuration serialization for subnets + +2013.05.30 Version 0.7.6 +* Added list, delete and create cluster operations for HD Insight. + +2013.05.15 Version 0.7.5 +* Fixed registration hubs issue with requiring access key when shared key was provided. +* Fixed registration hubs issue with listByTag, Channel and device token + +2013.05.09 Version 0.7.4 +* Fixed encoding issue with partition and row keys in table storage query + +2013.05.01 Version 0.7.3 +* Fixed issue #680: BlobService.getBlobUrl puts permissions in SAS url even if not given +* Changes to test suite & sdk to run in other environments +* Notification hubs registrations +* Support in ServiceManagementClient for role reboot and reimage + +2013.04.05 Version 0.7.2 +* Removing workaround for SSL issue and forcing node version to be outside the > 0.8 && < 0.10.3 range where the issue occurs + +2013.04.03 Version 0.7.1 +* Adding (limited) support for node 0.10 +* Fixing issue regarding registering providers when using websites or mobiles services + +2013.03.25 Version 0.7.0 +* Breaking change: Primitive types will be stored for table storage. +* Adding support for creating and deleting affinity groups +* Replacing http-mock by nock and making all tests use it by default +* Adding notification hubs messages for WNS and APNS +* Add Strict SSL validation for server certificates +* Add support for creating subscriptions that expire + +2013.03.12 Version 0.6.11 +* Added constraint to package.json to restrict to node versions < 0.9. + +2013.02.11 Version 0.6.10 +* Added helper date.* functions for generating SAS expirations (secondsFromNow, minutesFromNow, hoursFromNow, daysFromNow) +* Added SQL classes for managing SQL Servers, Databases and Firewall rules +* Updating to use latest xml2js + +2012.12.12 Version 0.6.9 +* Exporting WebResource, Client classes from package to support CLI. +* Install message updated to remind users the CLI is now a separate package. + +2012.11.20 Version 0.6.8 + * CLI functionality has been pulled out into a new "azure-cli" module. See https://github.com/WindowsAzure/azure-sdk-tools-xplat for details. + * Add support for sb: in ServiceBus connection strings. + * Add functions to ServiceManagement for managing storage accounts. + * Merged #314 from @smarx for allowing empty partition keys on the client. + * Merged #447 from @anodejs for array enumeration and exception on batch response. + * Various other fixes + +2012.10.15 Version 0.6.7 + * Adding connection strings support for storage and service bus + * Fixing issue with EMULATED and explicit variables making the later more relevant + * Adding Github support + * Adding website application settings support + +2012.10.12 Version 0.6.6 + * Using fixed version of commander.js to avoid bug in commander.js 1.0.5 + +2012.10.01 Version 0.6.5 + * Bugfixing + +2012.09.18 Version 0.6.4 + * Multiple Bugfixes around blob streaming + +2012.09.09 Version 0.6.3 + * Fixing issue with xml2js + +2012.08.15 Version 0.6.2 + * Multiple Bugfixes + +2012.07.02 Version 0.6.1 + * Multiple Bugfixes + * Adding subscription setting and listing functionality. + +2012.06.06 Version 0.6.0 + * Adding CLI tool + * Multiple Bugfixes + +2012.04.19 Version 0.5.3 + * Service Runtime Wrappers + * Multiple Bugfixes + * Unit tests converted to mocha and code coverage made easy through JSCoverage + +2012.02.10 Version 0.5.2 + * Service Bus Wrappers + * Storage Services UT run against a mock server. + * Node.exe version requirement lowered to raise compatibility. + * Multiple Bugfixes + +2011.12.14 Version 0.5.1 + * Multiple bug fixes + +2011.12.09 Version 0.5.0 + * Initial Release diff --git a/src/node_modules/azure-storage/ISSUE_TEMPLATE.md b/src/node_modules/azure-storage/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..dba6d32 --- /dev/null +++ b/src/node_modules/azure-storage/ISSUE_TEMPLATE.md @@ -0,0 +1,16 @@ +### Which service(blob, file, queue, table) does this issue concern? + + +### Which version of the SDK was used? + + +### What's the Node.js/Browser version? + + +### What problem was encountered? + + +### Steps to reproduce the issue? + + +### Have you found a mitigation/solution? diff --git a/src/node_modules/azure-storage/LICENSE.txt b/src/node_modules/azure-storage/LICENSE.txt new file mode 100644 index 0000000..21254fc --- /dev/null +++ b/src/node_modules/azure-storage/LICENSE.txt @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/src/node_modules/azure-storage/README.md b/src/node_modules/azure-storage/README.md new file mode 100644 index 0000000..3cd96db --- /dev/null +++ b/src/node_modules/azure-storage/README.md @@ -0,0 +1,560 @@ +# Microsoft Azure Storage SDK for Node.js and JavaScript for Browsers + +[![NPM version](https://badge.fury.io/js/azure-storage.svg)](http://badge.fury.io/js/azure-storage) + +* Master [![Build Status](https://travis-ci.org/Azure/azure-storage-node.svg?branch=master)](https://travis-ci.org/Azure/azure-storage-node/branches) [![Coverage Status](https://coveralls.io/repos/Azure/azure-storage-node/badge.svg?branch=master&service=github)](https://coveralls.io/github/Azure/azure-storage-node?branch=master) +* Dev [![Build Status](https://travis-ci.org/Azure/azure-storage-node.svg?branch=dev)](https://travis-ci.org/Azure/azure-storage-node/branches) [![Coverage Status](https://coveralls.io/repos/Azure/azure-storage-node/badge.svg?branch=dev&service=github)](https://coveralls.io/github/Azure/azure-storage-node?branch=dev) + +This project provides a Node.js package and a browser compatible [JavaScript Client Library](https://github.com/Azure/azure-storage-node#azure-storage-javascript-client-library-for-browsers) that makes it easy to consume and manage Microsoft Azure Storage Services. + +> This README page is a reference to the SDK v2. For the new SDK v10, go to [Storage SDK v10 for JavaScript](https://github.com/Azure/azure-storage-js). + +| SDK Name | Version | Description | NPM/API Reference Links | +|------------------------------------------------------------------------------------------|-------------|------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Storage SDK v10 for JavaScript](https://github.com/Azure/azure-storage-js) | v10 | The next generation Storage SDK (Blob/Queue/File, async and promise support) | [NPM](https://www.npmjs.com/package/@azure/storage-blob) - [Reference](https://docs.microsoft.com/en-us/javascript/api/overview/azure/storage/client?view=azure-node-preview) | +| [Storage SDK v2 for JavaScript](https://github.com/Azure/azure-storage-node) | v2 | Legacy Storage SDK in this repository (Blob/Queue/File/Table, callback style) | [NPM](https://www.npmjs.com/package/azure-storage) - [Reference](https://docs.microsoft.com/en-us/javascript/api/azure-storage/?view=azure-node-latest) | +| [Azure Management SDKs for JavaScript](https://github.com/Azure/azure-sdk-for-node) | v2 | Management SDKs including Storage Resource Provider APIs | [NPM](https://www.npmjs.com/package/azure) - [Reference](https://github.com/Azure/azure-sdk-for-node#documentation) | + +# Features + +- Blobs + - Create/Delete Containers + - Create/Read/Update/Delete Blobs +- Tables + - Create/Delete Tables + - Query/Create/Read/Update/Delete Entities +- Files + - Create/Delete Shares + - Create/Delete Directories + - Create/Read/Update/Delete Files +- Queues + - Create/Delete Queues + - Insert/Peek Queue Messages + - Advanced Queue Operations +- Service Properties + - Get Service Properties + - Set Service Properties + +Please check details on API reference documents: + +* [Microsoft official API document on docs.microsoft.com](https://docs.microsoft.com/en-us/javascript/api/azure-storage/?view=azure-node-latest) +* [Generated API references on GitHub pages](http://azure.github.io/azure-storage-node) + +# Getting Started + +## Install + +```shell +npm install azure-storage +``` + +## Usage + +```Javascript +var azure = require('azure-storage'); +``` + +When using the Storage SDK, you must provide connection information for the storage account to use. This can be provided using: + +* Environment variables - **AZURE_STORAGE_ACCOUNT** and **AZURE_STORAGE_ACCESS_KEY**, or **AZURE_STORAGE_CONNECTION_STRING**. + +* Constructors - For example, `var tableSvc = azure.createTableService(accountName, accountKey);` + +### Blob Storage + +The **createContainerIfNotExists** method can be used to create a +container in which to store a blob: + +```Javascript +var azure = require('azure-storage'); +var blobService = azure.createBlobService(); +blobService.createContainerIfNotExists('taskcontainer', { + publicAccessLevel: 'blob' +}, function(error, result, response) { + if (!error) { + // if result = true, container was created. + // if result = false, container already existed. + } +}); +``` + +To upload a file (assuming it is called task1-upload.txt and it is placed in the same folder as the script below), the method **createBlockBlobFromLocalFile** can be used. + +```Javascript +var azure = require('azure-storage'); +var blobService = azure.createBlobService(); + +blobService.createBlockBlobFromLocalFile('mycontainer', 'taskblob', 'task1-upload.txt', function(error, result, response) { + if (!error) { + // file uploaded + } +}); +``` + + +For page blobs, use **createPageBlobFromLocalFile**. There are other methods for uploading blobs also, such as **createBlockBlobFromText** or **createPageBlobFromStream**. + +There are also several ways to download block and page blobs. For example, **getBlobToStream** downloads the blob to a stream: + +```Javascript +var blobService = azure.createBlobService(); +var fs = require('fs'); +blobService.getBlobToStream('mycontainer', 'taskblob', fs.createWriteStream('output.txt'), function(error, result, response) { + if (!error) { + // blob retrieved + } +}); +``` + +To create a Shared Access Signature (SAS), use the **generateSharedAccessSignature** method. Additionally you can use the **date** helper functions to easily create a SAS that expires at some point relative to the current time. + +```Javascript +var azure = require('azure-storage'); +var blobService = azure.createBlobService(); + +var startDate = new Date(); +var expiryDate = new Date(startDate); +expiryDate.setMinutes(startDate.getMinutes() + 100); +startDate.setMinutes(startDate.getMinutes() - 100); + +var sharedAccessPolicy = { + AccessPolicy: { + Permissions: azure.BlobUtilities.SharedAccessPermissions.READ, + Start: startDate, + Expiry: expiryDate + } +}; + +var token = blobService.generateSharedAccessSignature(containerName, blobName, sharedAccessPolicy); +var sasUrl = blobService.getUrl(containerName, blobName, token); +``` + +### Table Storage + +To ensure a table exists, call **createTableIfNotExists**: + +```Javascript +var azure = require('azure-storage'); +var tableService = azure.createTableService(); +tableService.createTableIfNotExists('mytable', function(error, result, response) { + if (!error) { + // result contains true if created; false if already exists + } +}); +``` +A new entity can be added by calling **insertEntity** or **insertOrReplaceEntity**: + +```Javascript +var azure = require('azure-storage'); +var tableService = azure.createTableService(); +var entGen = azure.TableUtilities.entityGenerator; +var entity = { + PartitionKey: entGen.String('part2'), + RowKey: entGen.String('row1'), + boolValueTrue: entGen.Boolean(true), + boolValueFalse: entGen.Boolean(false), + intValue: entGen.Int32(42), + dateValue: entGen.DateTime(new Date(Date.UTC(2011, 10, 25))), + complexDateValue: entGen.DateTime(new Date(Date.UTC(2013, 02, 16, 01, 46, 20))) +}; +tableService.insertEntity('mytable', entity, function(error, result, response) { + if (!error) { + // result contains the ETag for the new entity + } +}); +``` + + +Instead of creating entities manually, you can use **entityGenerator**: + +```Javascript +var azure = require('azure-storage'); +var entGen = azure.TableUtilities.entityGenerator; +var task = { + PartitionKey: entGen.String('hometasks'), + RowKey: entGen.String('1'), + description: entGen.String('take out the trash'), + dueDate: entGen.DateTime(new Date(Date.UTC(2015, 6, 20))) +}; +``` + +The method **retrieveEntity** can then be used to fetch the entity that was just inserted: + +```Javascript +var azure = require('azure-storage'); +var tableService = azure.createTableService(); +tableService.retrieveEntity('mytable', 'part2', 'row1', function(error, result, response) { + if (!error) { + // result contains the entity + } +}); +``` + +The method **replaceEntity** or **insertOrReplaceEntity** can be called to update/edit an existing entry. In the following example we assume that an entity `'part2', 'row1'` with a field `'taskDone'` set to `false` already exists. + +```Javascript +var azure = require('azure-storage'); +var tableService = azure.createTableService(); +var entity = { + PartitionKey: entGen.String('part2'), + RowKey: entGen.String('row1'), + taskDone: entGen.Boolean(true), +}; + +tableService.insertOrReplaceEntity('mytable', entity, function(error, result, response) { + if (!error) { + // result contains the entity with field 'taskDone' set to `true` + } +}); +``` + +Use **TableQuery** to build complex queries: + +```Javascript +var azure = require('azure-storage'); +var tableService = azure.createTableService(); +var query = new azure.TableQuery() + .top(5) + .where('PartitionKey eq ?', 'part2'); + +tableService.queryEntities('mytable', query, null, function(error, result, response) { + if (!error) { + // result.entries contains entities matching the query + } +}); +``` + +### Queue Storage + +The **createQueueIfNotExists** method can be used to ensure a queue exists: + +```Javascript +var azure = require('azure-storage'); +var queueService = azure.createQueueService(); +queueService.createQueueIfNotExists('taskqueue', function(error) { + if (!error) { + // Queue exists + } +}); +``` + +The **createMessage** method can then be called to insert the message into the queue: + +```Javascript +var queueService = azure.createQueueService(); +queueService.createMessage('taskqueue', 'Hello world!', function(error) { + if (!error) { + // Message inserted + } +}); +``` + +It is then possible to call the **getMessage** method, process the message and then call **deleteMessage** inside the callback. This two-step process ensures messages don't get lost when they are removed from the queue. + +```Javascript +var queueService = azure.createQueueService(), + queueName = 'taskqueue'; +queueService.getMessages(queueName, function(error, serverMessages) { + if (!error) { + // Process the message in less than 30 seconds, the message + // text is available in serverMessages[0].messageText + + queueService.deleteMessage(queueName, serverMessages[0].messageId, serverMessages[0].popReceipt, function(error) { + if (!error) { + // Message deleted + } + }); + } +}); +``` + +### File Storage + +The **createShareIfNotExists** method can be used to create a +share in which to store a file or a directory of files: + +```Javascript +var azure = require('azure-storage'); +var fileService = azure.createFileService(); +fileService.createShareIfNotExists('taskshare', function(error, result, response) { + if (!error) { + // if result = true, share was created. + // if result = false, share already existed. + } +}); +``` + +To create a directory, the method **createDirectoryIfNotExists** can be used. + +```Javascript +var azure = require('azure-storage'); +var fileService = azure.createFileService(); + +fileService.createDirectoryIfNotExists('taskshare', 'taskdirectory', function(error, result, response) { + if (!error) { + // if result.created = true, share was created. + // if result.created = false, share already existed. + } +}); +``` + +To upload a file (assuming it is called task1-upload.txt and it is placed in the same folder as the script below), the method **createFileFromLocalFile** can be used. + +```Javascript +var azure = require('azure-storage'); +var fileService = azure.createFileService(); + +fileService.createFileFromLocalFile('taskshare', 'taskdirectory', 'taskfile', 'task1-upload.txt', function(error, result, response) { + if (!error) { + // file uploaded + } +}); +``` + +To upload a file from a stream, the method **createFileFromStream** can be used. The var `myFileBuffer` in the script below is a native Node Buffer, or ArrayBuffer object if within a browser environment. + +```Javascript + var stream = require('stream'); + var azure = require('azure-storage'); + var fileService = azure.createFileService(); + + var fileStream = new stream.Readable(); + fileStream.push(myFileBuffer); + fileStream.push(null); + + fileService.createFileFromStream('taskshare', 'taskdirectory', 'taskfile', fileStream, myFileBuffer.length, function(error, result, response) { + if (!error) { + // file uploaded + } + }); +``` + +To create a file from a text string, the method **createFileFromText** can be used. A Node Buffer or ArrayBuffer object containing the text can also be supplied. + +```Javascript + var azure = require('azure-storage'); + var fileService = azure.createFileService(); + + var text = 'Hello World!'; + + fileService.createFileFromText('taskshare', 'taskdirectory', 'taskfile', text, function(error, result, response) { + if (!error) { + // file created + } + }); +``` + +There are also several ways to download files. For example, **getFileToStream** downloads the file to a stream: + +```Javascript +var fileService = azure.createFileService(); +var fs = require('fs'); +fileService.getFileToStream('taskshare', 'taskdirectory', 'taskfile', fs.createWriteStream('output.txt'), function(error, result, response) { + if (!error) { + // file retrieved + } +}); +``` + +### Service Properties + +The **getServiceProperties** method can be used to fetch the logging, metrics and CORS settings on your storage account: + +```Javascript +var azure = require('azure-storage'); +var blobService = azure.createBlobService(); + +blobService.getServiceProperties(function(error, result, response) { + if (!error) { + var serviceProperties = result; + // properties are fetched + } +}); +``` + +The **setServiceProperties** method can be used to modify the logging, metrics and CORS settings on your storage account: + +```Javascript +var azure = require('azure-storage'); +var blobService = azure.createBlobService(); + +var serviceProperties = generateServiceProperties(); + +blobService.setServiceProperties(serviceProperties, function(error, result, response) { + if (!error) { + // properties are set + } +}); + +function generateServiceProperties() { + return serviceProperties = { + Logging: { + Version: '1.0', + Delete: true, + Read: true, + Write: true, + RetentionPolicy: { + Enabled: true, + Days: 10, + }, + }, + HourMetrics: { + Version: '1.0', + Enabled: true, + IncludeAPIs: true, + RetentionPolicy: { + Enabled: true, + Days: 10, + }, + }, + MinuteMetrics: { + Version: '1.0', + Enabled: true, + IncludeAPIs: true, + RetentionPolicy: { + Enabled: true, + Days: 10, + }, + }, + Cors: { + CorsRule: [ + { + AllowedOrigins: ['www.azure.com', 'www.microsoft.com'], + AllowedMethods: ['GET', 'PUT'], + AllowedHeaders: ['x-ms-meta-data*', 'x-ms-meta-target*', 'x-ms-meta-xyz', 'x-ms-meta-foo'], + ExposedHeaders: ['x-ms-meta-data*', 'x-ms-meta-source*', 'x-ms-meta-abc', 'x-ms-meta-bcd'], + MaxAgeInSeconds: 500, + }, + { + AllowedOrigins: ['www.msdn.com', 'www.asp.com'], + AllowedMethods: ['GET', 'PUT'], + AllowedHeaders: ['x-ms-meta-data*', 'x-ms-meta-target*', 'x-ms-meta-xyz', 'x-ms-meta-foo'], + ExposedHeaders: ['x-ms-meta-data*', 'x-ms-meta-source*', 'x-ms-meta-abc', 'x-ms-meta-bcd'], + MaxAgeInSeconds: 500, + }, + ], + }, + }; +} +``` + +When modifying the service properties, you can fetch the properties and then modify the them to prevent overwriting the existing settings. + +```Javascript +var azure = require('azure-storage'); +var blobService = azure.createBlobService(); + +blobService.getServiceProperties(function(error, result, response) { + if (!error) { + var serviceProperties = result; + + // modify the properties + + blobService.setServiceProperties(serviceProperties, function(error, result, response) { + if (!error) { + // properties are set + } + }); + } +}); +``` + +### Retry Policies + +By default, no retry will be performed with service instances newly created by Azure storage client library for Node.js. +Two pre-written retry polices [ExponentialRetryPolicyFilter](http://azure.github.io/azure-storage-node/ExponentialRetryPolicyFilter.html) and [LinearRetryPolicyFilter](http://azure.github.io/azure-storage-node/LinearRetryPolicyFilter.html) are available with modifiable settings, and can be used through associating filter. +Any custom retry logic may be used by customizing RetryPolicyFilter instance. + +For how to use pre-written retry policies and how to define customized retry policy, please refer to **retrypolicysample** in samples directory. + +## Code Samples + +How-Tos focused around accomplishing specific tasks are available on the [Microsoft Azure Node.js Developer Center](http://azure.microsoft.com/en-us/develop/nodejs/). + +* [How to use the Blob Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-blob-storage/) + +* [How to use the Table Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-table-storage/) + +* [How to use the Queue Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-queues/) + +# Running Tests + +Unit tests can then be run from the module's root directory using: + +```shell +npm test +``` + +Running test is also supported by Grunt by: + +```shell +grunt # mochaTest as the default task +``` + +By default the unit tests are ran with Nock recording data. To run tests against real storage account, please set environment variable to turn off Nock by: + +``` +set NOCK_OFF=true +``` + +and set up the following environment variable for storage account credentials by + +```Batchfile +set AZURE_STORAGE_CONNECTION_STRING="valid storage connection string" +``` + +To record the data in a test pass against real storage account for future Nock usage: + +```Batchfile +set AZURE_NOCK_RECORD=true +``` + +In order to be able to use a proxy like fiddler, an additional environment variable should be set up: + +```Batchfile +set NODE_TLS_REJECT_UNAUTHORIZED=0 +set HTTP_PROXY=http://127.0.0.1:8888 +``` + +On Linux, please use `export` other than `set` to set the variables. + +# Azure Storage JavaScript Client Library for Browsers + +Azure Storage Node.js Client Library is compatible with [Browserify](http://browserify.org/). This means you can bundle your Node.js application which depends on the Node.js Client Library using Browserify. + +You can also choose to download the JavaScript Client Library provided by us, or generate the library by yourself. Please refer to the [README.md](https://github.com/Azure/azure-storage-node/blob/master/browser/README.md) under `browser` folder for detailed usage guidelines. + +## Downloading Azure Storage JavaScript Client Library + +It's recommended to use the Azure Storage JavaScript Client Library provided by us. Please [download the latest library](https://aka.ms/downloadazurestoragejs). + +## Generating Azure Storage JavaScript Client Library + +We also provide browserify bundle scripts which generate Azure Storage JavaScript Client Library. The bundle script reduces the size of the Storage Client Library by splitting into smaller files, one per storage service. For more detailed information, refer to [README.md](https://github.com/Azure/azure-storage-node/blob/master/browser/README.md) under `browser` folder. + +# JsDoc + +JsDoc can be generated by `grunt jsdoc`. + +To load the docs by devserver after generation, run `grunt doc` and then browse the docs at [http://localhost:8888](http://localhost:8888). + +# Need Help? + +Be sure to check out the Microsoft Azure [Developer Forums on MSDN](http://go.microsoft.com/fwlink/?LinkId=234489) if you have trouble with the provided code or use StackOverflow. + +# Learn More + +- [Microsoft Azure Node.js Developer Center](http://azure.microsoft.com/en-us/develop/nodejs/) +- [Azure Storage Team Blog](http://blogs.msdn.com/b/windowsazurestorage/) + +# Contribute + +We gladly accept community contributions. + +- Issues: Please report bugs using the Issues section of GitHub +- Forums: Interact with the development teams on StackOverflow or the Microsoft Azure Forums +- Source Code Contributions: If you would like to become an active contributor to this project please follow the instructions provided in [Contributing.md](CONTRIBUTING.md). + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +For general suggestions about Microsoft Azure please use our [UserVoice forum](http://feedback.azure.com/forums/34192--general-feedback). diff --git a/src/node_modules/azure-storage/browser/ChangeLog.md b/src/node_modules/azure-storage/browser/ChangeLog.md new file mode 100644 index 0000000..252b00d --- /dev/null +++ b/src/node_modules/azure-storage/browser/ChangeLog.md @@ -0,0 +1,140 @@ +Note: This is the change log file for Azure Storage JavaScript Client Library. + +2019.04 Version 2.10.103 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.10.3. +* Fixed callback not being called in _getBlobToLocalFile. +* Removed retryInfo.retryable check in retrypolicyfilter.js. +* Removed comment about maxResults. +* Fixed Travis-CI failed validation. +* Updated latest links and descriptions to V10 SDK in readme.md. + +2018.10 Version 2.10.102 + +ALL +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.10.2. +* Optimized browser samples and other documents. +* Added JSv10 link and docs.microsoft.com link. + +FILE +* Fixed an issue that empty text isn’t supported in `createFileFromText`. + +2018.08 Version 2.10.101 + +ALL +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.10.1. +* Fixed a bug that content type value is incorrect for json. +* Fixed an issue that user agent is set in browser environment. + +2018.06 Version 2.10.100 + +ALL +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.10.0. +* Updated storage service version to 2018-03-28. + +BLOB +* Fixed a bug that `DeleteRetentionPolicy.Days` should be `number` instead of `string` when calling `getServiceProperties`. +* Added a method `getAccountProperties` to `blobService`. +* Added a method `createBlockFromURL` to `blobService`. +* Added support for static website service properties (in preview). + +2018.05 Version 2.9.100-preview + +ALL +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.9.0-preview. +* Updated storage service version to 2017-11-09. +* Added `/* eslint-disable */` to generated JS files to avoid eslint warnings when using with create-react-app. +* Added `progress` event for `SpeedSummary` class, which will be triggered when every progress updates. + +BLOB +* Added `createBlobServiceWithTokenCredential()` to create `BlobService` object with bearer tokens such as OAuth access token (in preview). +* Added support for '$web' as a valid blob container name for static website. +* Added support for write-once read-many containers (in preview). +* The `Get Container Properties` and `List Containers` APIs now return two new properties indicating whether the container has an immutability policy or a legal hold. +* The `Get Blob Properties` and `List Blobs` APIs now return the creation time of the blob as a property. + +QUEUE +* Added `createQueueServiceWithTokenCredential()` to create `QueueService` object with bearer tokens such as OAuth access token (in preview). + +2018.04 Version 2.8.100 + +* Fixed a bug that retry policy will not retry for XHR error in browsers. +* Updated README.md under browser folder to make it more clear about the zip file downloading link. +* Updated github.io API reference title to include JavaScript. +* Updated local HTTP server requirements for IE11 and Chrome 56 in samples and documents. +* Added support for running UT/FT in browsers like Chrome based on Karma, with command `npm run jstest`. +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.8.2. + +2018.03 Version 0.2.8-preview.15 + +* Supported UMD module standard. +* Dropped `azure-storage.common.js`. +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.8.1. + +2018.02 Version 0.2.8-preview.14 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.8.0. + +2017.12 Version 0.2.7-preview.13 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.7.0. + +2017.10 Version 0.2.6-preview.12 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.6.0. + +2017.09 Version 0.2.5-preview.11 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.5.0. + +2017.08 Version 0.2.4-preview.10 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.4.0. + +2017.08 Version 0.2.3-preview.9 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.3.0. + +2017.08 Version 0.2.2-preview.8 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.2.2. + +2017.07 Version 0.2.2-preview.7 + +* Added browser specific APIs for blobs and files uploading. + * `BlobService.createBlockBlobFromBrowserFile` + * `BlobService.createPageBlobFromBrowserFile` + * `BlobService.createAppendBlobFromBrowserFile` + * `BlobService.appendFromBrowserFile` + * `FileService.createFileFromBrowserFile` +* Updated samples with above new added APIs. +* Dropped dependency to browserify-fs. + +2017.07 Version 0.2.2-preview.6 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.2.1. + +2017.06 Version 0.2.2-preview.5 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.2.0. + +2017.05 Version 0.2.1-preview.4 + +* Reduced footprint of the generated JavaScript files. +* Removed 7 local-file related APIs which are limited by browser's sandbox. + +2017.03 Version 0.2.1-preview.3 + +* Fixed missing 100% upload progress issue in blob sample for uploading blobs smaller than 32MB. +* Added speedSummary code example in the blob & file samples. + +2017.03 Version 0.2.1-preview.2 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.1.0. + +2017.03 Version 0.2.0-preview.1 + +* Generated browser compatible JavaScript files based on Microsoft Azure Storage SDK for Node.js 2.0.0. +* Added bundle scripts to generate Azure Storage JavaScript Client Library. +* Added npm command `npm run genjs` to generate JavaScript Client Library. +* Added samples for Azure Storage JavaScript Client Library. \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/README.md b/src/node_modules/azure-storage/browser/README.md new file mode 100644 index 0000000..f6bc1c8 --- /dev/null +++ b/src/node_modules/azure-storage/browser/README.md @@ -0,0 +1,109 @@ +# Azure Storage JavaScript Client Library for Browsers + +## Downloading + +It's recommended to use the Azure Storage JavaScript Client Library provided by us. Please [download the latest library](https://aka.ms/downloadazurestoragejs). + +There are 8 generated JavaScript files for Azure Storage JavaScript Client Library: +- `azure-storage.blob.js` and `azure-storage.blob.min.js` contain the Azure Storage blob service operation logic +- `azure-storage.table.js` and `azure-storage.table.min.js` contain the Azure Storage table service operation logic +- `azure-storage.queue.js` and `azure-storage.queue.min.js` contain the Azure Storage queue service operation logic +- `azure-storage.file.js` and `azure-storage.file.min.js` contain the Azure Storage file service operation logic + +We also provide samples to guide you quickly start with the Azure Storage JavaScript Client Library. In the [JavaScript Client Library zip file](https://aka.ms/downloadazurestoragejs) or following online links, you will find 4 HTML samples: +- [sample-blob.html](https://dmrelease.blob.core.windows.net/azurestoragejssample/samples/sample-blob.html) demonstrates how to operate with Azure Storage blob service in the browser +- [sample-table.html](https://dmrelease.blob.core.windows.net/azurestoragejssample/samples/sample-table.html) demonstrates how to operate with Azure Storage table service in the browser +- [sample-queue.html](https://dmrelease.blob.core.windows.net/azurestoragejssample/samples/sample-queue.html) demonstrates how to operate with Azure Storage queue service in the browser +- [sample-file.html](https://dmrelease.blob.core.windows.net/azurestoragejssample/samples/sample-file.html) demonstrates how to operate with Azure Storage file service in the browser + +After generating the JavaScript Client Library, you can try the samples in browsers such as Chrome/Edge/Firefox directly. + +**Note**: An HTTP server should be set to host the samples for IE11 and Chrome (56 or newer versions). + +Or you can directly try with following online samples: +- [sample-blob](https://dmrelease.blob.core.windows.net/azurestoragejssample/samples/sample-blob.html) +- [sample-table](https://dmrelease.blob.core.windows.net/azurestoragejssample/samples/sample-table.html) +- [sample-queue](https://dmrelease.blob.core.windows.net/azurestoragejssample/samples/sample-queue.html) +- [sample-file](https://dmrelease.blob.core.windows.net/azurestoragejssample/samples/sample-file.html) + +## Module Support + +Above JavaScript files are all [UMD compatible](https://github.com/umdjs/umd). You can load them in a CommonJS or AMD environment by JavaScript module loaders. If no module system is found, following global variables will be set: +- `AzureStorage.Blob` +- `AzureStorage.Table` +- `AzureStorage.Queue` +- `AzureStorage.File` + +## Compatibility + +Compatibility with mobile browsers have not been fully validated, please open issues when you get errors. + +# Running Tests against Browsers + +Running tests against Chrome by default. The Storage Account should be configured with CORS support before running test. Please see above online samples about how to configure CORS rules for an account. + +``` +set AZURE_STORAGE_CONNECTION_STRING="valid storage connection string" +npm install +npm run jstest +``` + +## Generating a Custom Azure Storage JavaScript Client Library + +If you wish to customize the library and generate the Azure Storage JavaScript Client Library, you can follow the following steps. + +We provide browserify bundle scripts which generate Azure Storage JavaScript Client Library. The bundle script reduces the size of the Storage Client Library by splitting into smaller files, one per storage service. + +The generated JavaScript Client Library includes 8 separated JavaScript files: +- `azure-storage.blob.js` +- `azure-storage.table.js` +- `azure-storage.queue.js` +- `azure-storage.file.js` +- `azure-storage.blob.min.js` +- `azure-storage.table.min.js` +- `azure-storage.queue.min.js` +- `azure-storage.file.min.js` + +Let's get started to generate the Azure Storage JavaScript Client Library! + +### Step 1: Cloning Repo + +Azure Storage JavaScript Client Library is generated from Azure Storage SDK for Node.js. Clone `azure-storage-node` repo with following command: + +```Batchfile +git clone https://github.com/Azure/azure-storage-node.git +``` + +### Step 2: Installing Node.js Modules + +Change to the root directory of the cloned repo: + +```Batchfile +cd azure-storage-node +``` + +Install the dependent Node.js modules: + +```Batchfile +npm install +``` + +### Step 3: Generating JavaScript Client Library with Bundle Scripts + +We provide bundle scripts to help quickly generate the JavaScript Client Library. At the root directory of the cloned repo: + +```Batchfile +npm run genjs [VERSION_NUMBER] +``` + +### Step 4: Finding the Generated JavaScript Files + +If everything goes well, the generated JavaScript files should be saved to `azure-storage-node/browser/bundle`. There will be 8 generated JavaScript files totally: +- `azure-storage.blob.js` +- `azure-storage.table.js` +- `azure-storage.queue.js` +- `azure-storage.file.js` +- `azure-storage.blob.min.js` +- `azure-storage.table.min.js` +- `azure-storage.queue.min.js` +- `azure-storage.file.min.js` \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/azure-storage.blob.export.js b/src/node_modules/azure-storage/browser/azure-storage.blob.export.js new file mode 100644 index 0000000..3bd7a28 --- /dev/null +++ b/src/node_modules/azure-storage/browser/azure-storage.blob.export.js @@ -0,0 +1,74 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +module.exports.generateDevelopmentStorageCredentials = function (proxyUri) { + var devStore = 'UseDevelopmentStorage=true;'; + if(proxyUri){ + devStore += 'DevelopmentStorageProxyUri=' + proxyUri; + } + + return devStore; +}; + +var BlobService = require('../lib/services/blob/blobservice.browser'); + +module.exports.BlobService = BlobService; +module.exports.BlobUtilities = require('../lib/services/blob/blobutilities'); + +module.exports.createBlobService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new BlobService(storageAccountOrConnectionString, storageAccessKey, host, null); +}; + +module.exports.createBlobServiceWithSas = function (host, sasToken) { + return new BlobService(null, null, host, sasToken); +}; + +module.exports.createBlobServiceWithTokenCredential = function (host, tokenCredential) { + return new BlobService(null, null, host, null, null, tokenCredential); +}; + +module.exports.createBlobServiceAnonymous = function (host) { + return new BlobService(null, null, host, null); +}; + +var azureCommon = require('../lib/common/common.browser'); +var StorageServiceClient = azureCommon.StorageServiceClient; +var SharedKey = azureCommon.SharedKey; + +module.exports.generateAccountSharedAccessSignature = function(storageAccountOrConnectionString, storageAccessKey, sharedAccessAccountPolicy) +{ + var storageSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey); + var sharedKey = new SharedKey(storageSettings._name, storageSettings._key); + + return sharedKey.generateAccountSignedQueryString(sharedAccessAccountPolicy); +}; + +module.exports.Constants = azureCommon.Constants; +module.exports.StorageUtilities = azureCommon.StorageUtilities; +module.exports.AccessCondition = azureCommon.AccessCondition; + +module.exports.SR = azureCommon.SR; +module.exports.StorageServiceClient = StorageServiceClient; +module.exports.Logger = azureCommon.Logger; +module.exports.WebResource = azureCommon.WebResource; +module.exports.Validate = azureCommon.validate; +module.exports.date = azureCommon.date; +module.exports.TokenCredential = azureCommon.TokenCredential; + +// Other filters +module.exports.LinearRetryPolicyFilter = azureCommon.LinearRetryPolicyFilter; +module.exports.ExponentialRetryPolicyFilter = azureCommon.ExponentialRetryPolicyFilter; +module.exports.RetryPolicyFilter = azureCommon.RetryPolicyFilter; \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/azure-storage.file.export.js b/src/node_modules/azure-storage/browser/azure-storage.file.export.js new file mode 100644 index 0000000..61e7226 --- /dev/null +++ b/src/node_modules/azure-storage/browser/azure-storage.file.export.js @@ -0,0 +1,65 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +module.exports.generateDevelopmentStorageCredentials = function (proxyUri) { + var devStore = 'UseDevelopmentStorage=true;'; + if(proxyUri){ + devStore += 'DevelopmentStorageProxyUri=' + proxyUri; + } + + return devStore; +}; + +var FileService = require('../lib/services/file/fileservice.browser'); + +module.exports.FileService = FileService; +module.exports.FileUtilities = require('../lib/services/file/fileutilities'); + +module.exports.createFileService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new FileService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +module.exports.createFileServiceWithSas = function (hostUri, sasToken) { + return new FileService(null, null, hostUri, sasToken); +}; + +var azureCommon = require('../lib/common/common.browser'); +var StorageServiceClient = azureCommon.StorageServiceClient; +var SharedKey = azureCommon.SharedKey; + +module.exports.generateAccountSharedAccessSignature = function(storageAccountOrConnectionString, storageAccessKey, sharedAccessAccountPolicy) +{ + var storageSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey); + var sharedKey = new SharedKey(storageSettings._name, storageSettings._key); + + return sharedKey.generateAccountSignedQueryString(sharedAccessAccountPolicy); +}; + +module.exports.Constants = azureCommon.Constants; +module.exports.StorageUtilities = azureCommon.StorageUtilities; +module.exports.AccessCondition = azureCommon.AccessCondition; + +module.exports.SR = azureCommon.SR; +module.exports.StorageServiceClient = StorageServiceClient; +module.exports.Logger = azureCommon.Logger; +module.exports.WebResource = azureCommon.WebResource; +module.exports.Validate = azureCommon.validate; +module.exports.date = azureCommon.date; + +// Other filters +module.exports.LinearRetryPolicyFilter = azureCommon.LinearRetryPolicyFilter; +module.exports.ExponentialRetryPolicyFilter = azureCommon.ExponentialRetryPolicyFilter; +module.exports.RetryPolicyFilter = azureCommon.RetryPolicyFilter; \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/azure-storage.queue.export.js b/src/node_modules/azure-storage/browser/azure-storage.queue.export.js new file mode 100644 index 0000000..45726ee --- /dev/null +++ b/src/node_modules/azure-storage/browser/azure-storage.queue.export.js @@ -0,0 +1,71 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +module.exports.generateDevelopmentStorageCredentials = function (proxyUri) { + var devStore = 'UseDevelopmentStorage=true;'; + if(proxyUri){ + devStore += 'DevelopmentStorageProxyUri=' + proxyUri; + } + + return devStore; +}; + +var QueueService = require('../lib/services/queue/queueservice'); + +module.exports.QueueService = QueueService; +module.exports.QueueUtilities = require('../lib/services/queue/queueutilities'); +module.exports.QueueMessageEncoder = require('../lib/services/queue/queuemessageencoder'); + +module.exports.createQueueService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new QueueService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +module.exports.createQueueServiceWithSas = function(hostUri, sasToken) { + return new QueueService(null, null, hostUri, sasToken); +}; + +module.exports.createQueueServiceWithTokenCredential = function (host, tokenCredential) { + return new QueueService(null, null, host, null, null, tokenCredential); +}; + +var azureCommon = require('../lib/common/common.browser'); +var StorageServiceClient = azureCommon.StorageServiceClient; +var SharedKey = azureCommon.SharedKey; + +module.exports.generateAccountSharedAccessSignature = function(storageAccountOrConnectionString, storageAccessKey, sharedAccessAccountPolicy) +{ + var storageSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey); + var sharedKey = new SharedKey(storageSettings._name, storageSettings._key); + + return sharedKey.generateAccountSignedQueryString(sharedAccessAccountPolicy); +}; + +module.exports.Constants = azureCommon.Constants; +module.exports.StorageUtilities = azureCommon.StorageUtilities; +module.exports.AccessCondition = azureCommon.AccessCondition; + +module.exports.SR = azureCommon.SR; +module.exports.StorageServiceClient = StorageServiceClient; +module.exports.Logger = azureCommon.Logger; +module.exports.WebResource = azureCommon.WebResource; +module.exports.Validate = azureCommon.validate; +module.exports.date = azureCommon.date; +module.exports.TokenCredential = azureCommon.TokenCredential; + +// Other filters +module.exports.LinearRetryPolicyFilter = azureCommon.LinearRetryPolicyFilter; +module.exports.ExponentialRetryPolicyFilter = azureCommon.ExponentialRetryPolicyFilter; +module.exports.RetryPolicyFilter = azureCommon.RetryPolicyFilter; \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/azure-storage.table.export.js b/src/node_modules/azure-storage/browser/azure-storage.table.export.js new file mode 100644 index 0000000..9a93dd2 --- /dev/null +++ b/src/node_modules/azure-storage/browser/azure-storage.table.export.js @@ -0,0 +1,66 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +module.exports.generateDevelopmentStorageCredentials = function (proxyUri) { + var devStore = 'UseDevelopmentStorage=true;'; + if(proxyUri){ + devStore += 'DevelopmentStorageProxyUri=' + proxyUri; + } + + return devStore; +}; + +var TableService = require('../lib/services/table/tableservice'); +module.exports.TableService = TableService; +module.exports.TableQuery = require('../lib/services/table/tablequery'); +module.exports.TableBatch = require('../lib/services/table/tablebatch'); +module.exports.TableUtilities = require('../lib/services/table/tableutilities'); + +module.exports.createTableService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new TableService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +module.exports.createTableServiceWithSas = function (hostUri, sasToken) { + return new TableService(null, null, hostUri, sasToken); +}; + +var azureCommon = require('../lib/common/common.browser'); +var StorageServiceClient = azureCommon.StorageServiceClient; +var SharedKey = azureCommon.SharedKey; + +module.exports.generateAccountSharedAccessSignature = function(storageAccountOrConnectionString, storageAccessKey, sharedAccessAccountPolicy) +{ + var storageSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey); + var sharedKey = new SharedKey(storageSettings._name, storageSettings._key); + + return sharedKey.generateAccountSignedQueryString(sharedAccessAccountPolicy); +}; + +module.exports.Constants = azureCommon.Constants; +module.exports.StorageUtilities = azureCommon.StorageUtilities; +module.exports.AccessCondition = azureCommon.AccessCondition; + +module.exports.SR = azureCommon.SR; +module.exports.StorageServiceClient = StorageServiceClient; +module.exports.Logger = azureCommon.Logger; +module.exports.WebResource = azureCommon.WebResource; +module.exports.Validate = azureCommon.validate; +module.exports.date = azureCommon.date; + +// Other filters +module.exports.LinearRetryPolicyFilter = azureCommon.LinearRetryPolicyFilter; +module.exports.ExponentialRetryPolicyFilter = azureCommon.ExponentialRetryPolicyFilter; +module.exports.RetryPolicyFilter = azureCommon.RetryPolicyFilter; \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/bundle.js b/src/node_modules/azure-storage/browser/bundle.js new file mode 100644 index 0000000..4572ca7 --- /dev/null +++ b/src/node_modules/azure-storage/browser/bundle.js @@ -0,0 +1,77 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var browserify = require('browserify'); +var fs = require('fs'); +var path = require('path'); +var UglifyJS = require('uglify-js'); + +var version = process.argv[2] || process.env.AZURE_STORAGE_JAVASCRIPT_VERSION || ''; +var license = [ + '// Azure Storage JavaScript Client Library ' + version, + '// Copyright (c) Microsoft and contributors. All rights reserved.', + '/* eslint-disable */' +].join('\n') + '\n'; + +var outputFolder = 'bundle'; +var outputFolderPath = path.resolve(__dirname, outputFolder); + +console.log('Generating Azure Storage JavaScript Client Library to ' + outputFolderPath + ' ...'); + +if (version === '') { + console.warn( + 'No version number provided.', + 'You can set up a version number by first parameter of bundle.js or environment value AZURE_STORAGE_JAVASCRIPT_VERSION' + ); +} + +if (!fs.existsSync(outputFolderPath)) { + fs.mkdirSync(outputFolderPath); +} + +function build(exportFilePath, outputFilePath, moduleName, isMinify) { + browserify(exportFilePath, {standalone: moduleName}).bundle(function (err, src) { + if (err) { + console.error('Failed when parsing', exportFilePath, err); + return; + } + + var code = (src || '').toString(); + if (isMinify) { + result = UglifyJS.minify(code.trim()); + if (result.error) { + console.error('Minify failed when parsing', exportFilePath, err); + return; + } + + code = result.code; + } + + var ws = fs.createWriteStream(outputFilePath); + ws.write(license); + ws.write(code); + ws.end(); + }); +} + +build(path.resolve(__dirname, 'azure-storage.blob.export.js'), path.resolve(outputFolderPath, 'azure-storage.blob.js'), 'AzureStorage.Blob'); +build(path.resolve(__dirname, 'azure-storage.table.export.js'), path.resolve(outputFolderPath, 'azure-storage.table.js'), 'AzureStorage.Table'); +build(path.resolve(__dirname, 'azure-storage.queue.export.js'), path.resolve(outputFolderPath, 'azure-storage.queue.js'), 'AzureStorage.Queue'); +build(path.resolve(__dirname, 'azure-storage.file.export.js'), path.resolve(outputFolderPath, 'azure-storage.file.js'), 'AzureStorage.File'); +build(path.resolve(__dirname, 'azure-storage.blob.export.js'), path.resolve(outputFolderPath, 'azure-storage.blob.min.js'), 'AzureStorage.Blob', true); +build(path.resolve(__dirname, 'azure-storage.table.export.js'), path.resolve(outputFolderPath, 'azure-storage.table.min.js'), 'AzureStorage.Table', true); +build(path.resolve(__dirname, 'azure-storage.queue.export.js'), path.resolve(outputFolderPath, 'azure-storage.queue.min.js'), 'AzureStorage.Queue', true); +build(path.resolve(__dirname, 'azure-storage.file.export.js'), path.resolve(outputFolderPath, 'azure-storage.file.min.js'), 'AzureStorage.File', true); \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/samples/cors.PNG b/src/node_modules/azure-storage/browser/samples/cors.PNG new file mode 100644 index 0000000..4f6a97f Binary files /dev/null and b/src/node_modules/azure-storage/browser/samples/cors.PNG differ diff --git a/src/node_modules/azure-storage/browser/samples/sample-blob.html b/src/node_modules/azure-storage/browser/samples/sample-blob.html new file mode 100644 index 0000000..43364e4 --- /dev/null +++ b/src/node_modules/azure-storage/browser/samples/sample-blob.html @@ -0,0 +1,498 @@ + + + + + + Azure Storage JavaScript Client Library Sample for Blob Operations + + + +
+
+

Azure Storage JavaScript Client Library Sample for Blob Operations

+
+

In this sample, we will demonstrate common scenarios for Azure Blob Storage that includes creating, listing and deleting containers and blobs.

+
+

Azure Blob storage is a service for storing large amounts of unstructured object data, such as text or binary data, that can be accessed from anywhere in the world via HTTP or HTTPS. You can use Blob storage to expose data publicly to the world, or to store application data privately.

+ +
+
+ Note: You may need set up a HTTP server to host this sample for IE11 and latest Chrome. +
+
+ +

Contents:

+ + +

Step 1: Preparing an Azure Storage account with CORS rules set

+

Cross-origin resource sharing, or CORS, must be configured on the Azure Storage account to be accessed directly from JavaScript in the browser. + You are able to set the CORS rules for specific Azure Storage account on the Azure Portal. + The "Allowed origins" could be set to "*" to allow all the origins in this sample. + For more information about CORS, see Cross-Origin Resource Sharing (CORS).

+ + +

Step 2: Importing Azure Storage JavaScript Client Library

+

+ Importing azure-storage.blob.js in your HTML file for blob operations. +

+

+<script src="azure-storage.blob.js"></script>
+
+ +

Step 3: Creating an Azure Storage Blob Service Object

+

+ The BlobService object lets you work with containers and blobs. + Following code creates a BlobService object with storage account and SAS Token. +

+
+var blobUri = 'https://' + 'STORAGE_ACCOUNT' + '.blob.core.windows.net';
+var blobService = AzureStorage.Blob.createBlobServiceWithSas(blobUri, 'SAS_TOKEN');
+
+

+ You can load Azure Storage JavaScript Client Library in a CommonJS or AMD environment by JavaScript module loaders. If no module system is found, global variable AzureStorage.Blob will be set, which is the start point where we can create service objects for blob and access to the storage utilities. +

+
+
+ How to get full detailed API definitions? Currently, the JavaScript Client Library shares almost the same API definitions with Node.js SDK, besides Node.js runtime specific APIs. + Please check API details on Azure Storage API reference documents. The JavaScript global variable AzureStorage.Blob is just like the object require('azure-storage') returns in Node.js, but limits to Blob related interfaces. + Go to BlobService to view possible methods provided by BlobService class. +
+
+
+
+ Warning: Azure Storage JavaScript Client Library also supports creating BlobService based on Storage Account Key for authentication besides SAS Token. + However, for security concerns, we recommend use of a limited time SAS Token, generated by a backend web server using a Stored Access Policy. +
+
+ +

Step 4: Container Operations

+

+ A container provides a grouping of blobs. All blobs must be in a container. An account can contain an unlimited number of containers. A container can store an unlimited number of blobs. Note that the container name must be lowercase. + BlobService object provides plenty of interfaces for container operations. +

+ +

List Containers

+

BlobService provides listContainersSegmented and listContainersSegmentedWithPrefix for retrieving the containers list under a storage account.

+
+blobService.listContainersSegmented(null, function (error, results) {
+    if (error) {
+        // List container error
+    } else {
+        for (var i = 0, container; container = results.entries[i]; i++) {
+            // Deal with container object
+        }
+    }
+});
+
+ +

Create Container

+

BlobService provides createContainer and createContainerIfNotExists for creating a container under a storage account.

+
+blobService.createContainerIfNotExists('mycontainer', function(error, result) {
+    if (error) {
+        // Create container error
+    } else {
+        // Create container successfully
+    }
+});
+
+ +

Delete Container

+

BlobService provides deleteContainer and deleteContainerIfExists for deleting a container under a storage account.

+
+blobService.deleteContainerIfExists('mycontainer', function(error, result) {
+    if (error) {
+        // Delete container error
+    } else {
+        // Delete container successfully
+    }
+});
+
+ +

Executable Example

+

+ The sample will try to create an Azure Storage blob service object based on SAS Token authorization. + Enter your Azure Storage account name and SAS Token here, and executable examples in following steps dependent on the settings here. + Make sure you have set the CORS rules for the Azure Storage blob service, and the SAS Token is in valid period. +

+

+ + +

+

In the following executable example, you can try to list all the containers under your storage account settings, and try to create or delete one container from your account.

+
    +
  • Click button to view the container list under your Azure Storage account

  • +
  • +

    Click button to create a container under your Azure Storage account:

    +

    +
  • +
  • Click "Delete" button in the container list to delete the container under your Azure Storage account

  • +
  • Click "Select" button to select and operate with the blobs in next step

  • +
+
+ +

Step 5: Blob Operations

+

Blob: A file of any type and size. Azure Storage offers three types of blobs: block blobs, page blobs, and append blobs.

+

Block blobs are ideal for storing text or binary files, such as documents and media files. Append blobs are similar to block blobs in that they are made up of blocks, but they are optimized for append operations, so they are useful for logging scenarios. A single block blob can contain up to 50,000 blocks of up to 100 MB each, for a total size of slightly more than 4.75 TB (100 MB X 50,000). A single append blob can contain up to 50,000 blocks of up to 4 MB each, for a total size of slightly more than 195 GB (4 MB X 50,000).

+

Page blobs can be up to 1 TB in size, and are more efficient for frequent read/write operations. Azure Virtual Machines use page blobs as OS and data disks.

+

For details about naming containers and blobs, see Naming and Referencing Containers, Blobs, and Metadata.

+ +

List Blobs

+

BlobService provides listBlobsSegmented and listBlobsSegmentedWithPrefix for retrieving the blobs list under a container.

+
+blobService.listBlobsSegmented('mycontainer', null, function (error, results) {
+    if (error) {
+        // List blobs error
+    } else {
+        for (var i = 0, blob; blob = results.entries[i]; i++) {
+            // Deal with blob object
+        }
+    }
+});
+
+

Upload Blob

+

BlobService provides createBlockBlobFromBrowserFile, createPageBlobFromBrowserFile, createAppendBlobFromBrowserFile and appendFromBrowserFile for uploading or appending a blob from an HTML file in browsers. +

+ +

Uploading blob from stream. You can set up the blob name as well as the size of this uploading session.

+
+// If one file has been selected in the HTML file input element
+var file = document.getElementById('fileinput').files[0];
+
+var customBlockSize = file.size > 1024 * 1024 * 32 ? 1024 * 1024 * 4 : 1024 * 512;
+blobService.singleBlobPutThresholdInBytes = customBlockSize;
+
+var finishedOrError = false;
+var speedSummary = blobService.createBlockBlobFromBrowserFile('mycontainer', file.name, file, {blockSize : customBlockSize}, function(error, result, response) {
+    finishedOrError = true;
+    if (error) {
+        // Upload blob failed
+    } else {
+        // Upload successfully
+    }
+});
+refreshProgress();
+
+

Checking the upload progress with speedSummary object.

+
+speedSummary.on('progress', function () {
+    var process = speedSummary.getCompletePercent();
+    displayProcess(process);
+});
+
+
+
+ Warning: + By default, the speedSummary.getCompletePercent() only updates progress when a block is uploaded to server. There are 2 default settings that may influence the upload progress display. + +
    +
  • blobService.singleBlobPutThresholdInBytes is the maximum size (default 32MB), in bytes, of a blob before it must be separated into blocks.
  • +
  • Option {blockSize: SizeInBytes} of blobService.createBlockBlobFromStream() is the size (default 4MB) of every block in the storage layer.
  • +
+ + This means, by default, blobs smaller than 32MB will only get the progress update when the upload is over, and blobs larger than 32MB will update the process every 4MB. + For slow connections or progress reporting for small blobs, you can customize both the two settings into samller values such as 1MB or 512KB. Thus the progress will update with the smaller step you set. + However, very small block sizes will impact the storage performance especially for a large blob. +
+
+ +

Download Blob

+

+ BlobService provides interfaces for downloading a blob into browser memory. + Because of browser's sandbox limitation, we cannot save the downloaded data trunks into disk until we get all the data trunks of a blob into browser memory. + The browser's memory size is also limited especially for downloading huge blobs, so it's recommended to download a blob in browser with SAS Token authorized link directly. +

+

+ Shared access signatures (SAS) are a secure way to provide granular access to blobs and containers without providing your storage account name or keys. Shared access signatures are often used to provide limited access to your data, such as allowing a mobile app to access blobs. + The following code example generates a new shared access policy that allows the shared access signatures holder to perform read operations on the myblob blob, and expires 100 minutes after the time it is created. +

+
+
+ Note: You can choose to use the SAS Token in browser side, or generate a temporary SAS Token dynamically in your server side with Azure Storage C# or Node.js SDKs etc. according to your security requirements. +
+
+
+var downloadLink = blobService.getUrl('mycontainer', 'myblob', 'SAS_TOKEN');
+
+ +

Delete Blob

+

BlobService provides deleteContainer and deleteContainerIfExists for deleting a blob under a storage account.

+
+blobService.deleteBlobIfExists(container, blob, function(error, result) {
+    if (error) {
+        // Delete blob failed
+    } else {
+        // Delete blob successfully
+    }
+});
+
+

Executable Example

+

After clicked the "Select" button on the container list in last step, you are able to operate with the blobs under the selected container.

+

+ +
    +
  • Click button to view the blobs under your selected container

  • +
  • + Click button to upload a local file to current container after selecting a file: +

    +

  • +
  • Click "Delete" button to delete the blob

  • +
  • Click "Download" link to download a blob to local

  • +
+ +
+
Uploaded Bytes:
+
+
+ 0% +
+
+ +
+ +

Step 6: Creating your JavaScript Application based on Azure Storage JavaScript Client Library

+
    +
  1. Setting CORS rules for your selected Azure-Storage account blob service.
  2. +
  3. Including functional file(s) needed, such as "azure-storage.blob.js" for blob operation.
  4. +
  5. Using keyword "AzureStorage.Blob" to access to Azure storage JavaScript APIs for blobs.
  6. +
  7. Referring to API documents for detailed API definitions.
  8. +
+

You can view the source code of this sample for detailed reference.

+
+ + + + + + diff --git a/src/node_modules/azure-storage/browser/samples/sample-file.html b/src/node_modules/azure-storage/browser/samples/sample-file.html new file mode 100644 index 0000000..2125cc3 --- /dev/null +++ b/src/node_modules/azure-storage/browser/samples/sample-file.html @@ -0,0 +1,575 @@ + + + + + + Azure Storage JavaScript Client Library Sample for File Operations + + + +
+
+

Azure Storage JavaScript Client Library Sample for File Operations

+
+

In this sample, we will demonstrate common scenarios for Azure File Storage that includes creating, listing and deleting file shares, directories and files.

+
+

Azure File storage is a service for storing large amounts of unstructured object data, such as text or binary data, that can be accessed from anywhere in the world via HTTP or HTTPS. You can use file storage to expose data publicly to the world, or to store application data privately.

+

With Azure File storage, you can migrate legacy applications that rely on file shares to Azure quickly and without costly rewrites. Applications running in Azure virtual machines or cloud services or from on-premises clients can mount a file share in the cloud, just as a desktop application mounts a typical SMB share. Any number of application components can then mount and access the File storage share simultaneously. In this sample, you are able to create a file service with storage account and SAS Token. Based on the file service, you could create a file share, list files, upload files and delete files.

+ +
+
+ Note: You may need set up a HTTP server to host this sample for IE11 and latest Chrome. +
+
+ +

Contents:

+ + +

Step 1: Preparing an Azure Storage account with CORS rules set

+

Cross-origin resource sharing, or CORS, must be configured on the Azure Storage account to be accessed directly from JavaScript in the browser. + You are able to set the CORS rules for specific Azure Storage account on the Azure Portal. + The "Allowed origins" could be set to "*" to allow all the origins in this sample. + For more information about CORS, see Cross-Origin Resource Sharing (CORS).

+ + +

Step 2: Importing Azure Storage JavaScript Files

+

+ Importing azure-storage.file.js in your HTML file for file operations. +

+

+<script src="azure-storage.file.js"></script>
+
+ +

Step 3: Creating an Azure Storage File Service Object

+

+ The FileService object lets you work with files and directories. + Following code creates a FileService object with storage account and SAS Token. +

+
+var fileUri = 'https://' + 'STORAGE_ACCOUNT' + '.file.core.windows.net';
+var fileService = AzureStorage.File.createFileServiceWithSas(fileUri, 'SAS_TOKEN');
+
+

+ You can load Azure Storage JavaScript Client Library in a CommonJS or AMD environment by JavaScript module loaders. If no module system is found, global variable AzureStorage.File will be set, which is the start point where we can create service objects for file and access to the storage utilities. +

+
+
+ How to get full detailed API definitions? Currently, the JavaScript Client Library shares almost the same API definitions with Node.js SDK, besides Node.js runtime specific APIs. + Please check API details on Azure Storage API reference documents. The JavaScript global variable AzureStorage.File is just like the object require('azure-storage') returns in Node.js, but limits to File related interfaces. + Go to FileService to view possible methods provided by FileService class. +
+
+
+
+ Warning: Azure Storage JavaScript Client Library also supports creating FileService based on Storage Account Key for authentication besides SAS Token. + However, for security concerns, we recommend use of a limited time SAS Token, generated by a backend web server using a Stored Access Policy. +
+
+ +

Step 4: File Share Operations

+

Share: A File storage share is an SMB file share in Azure. All directories and files must be created in a parent share. An account can contain an unlimited number of shares, and a share can store an unlimited number of files, up to the 5 TB total capacity of the file share.

+ +

List File Shares

+

FileService provides listSharesSegmented and listSharesSegmentedWithPrefix for listing file shares under a storage account.

+
+fileService.listSharesSegmented(null, function(error, result) {
+    if(error) {
+        // List shares error
+    } else {
+        for (var i = 0, share; share = results.entries[i]; i++) {
+            // Deal with share object
+        }
+    }
+});
+
+ +

Create File Share

+

FileService provides createShare and createShareIfNotExists for creating a file share under a storage account.

+
+fileService.createShareIfNotExists('myshare', function(error, result) {
+    if(error) {
+        // Create share error
+    } else {
+        // Create share successfully
+    }
+});
+
+ +

Delete File Share

+

FileService provides deleteShare and deleteShareIfExists for deleting a file share under a storage account.

+
+fileService.deleteShareIfExists('myshare', function(error, result) {
+    if(error) {
+        // Delete share error
+    } else {
+        // Delete share successfully
+    }
+});
+
+ +

Executable Example

+

The sample will try to create an Azure Storage file service object based on SAS Token authorization. Enter your Azure Storage account name and SAS Token here. Make sure you have set the CORS rules for the Azure Storage file service, and the SAS Token is in valid period.

+ + +

Azure Storage file service provides plenty of interfaces for file operations. In following example, you can try to list all the file shares under your storage account, and try to create or delete one file share from your account.

+
    +
  • Click button to view the file share list under your Azure Storage account

  • +
  • +

    Click button to create a file share under your Azure Storage account

    +

    +
  • +
  • Click "Delete" button to delete the file share under your Azure Storage account

  • +
  • Click "Select" button to operate with the directories and files in next step

  • +
+
+ +

Step 5: Directory and File Operations

+

Directory in storage is an optional hierarchy of directories.

+

File: A file in the share. A file may be up to 1 TB in size.

+ +

List Files and Directories

+

FileService provides listFilesAndDirectoriesSegmented for listing directories and files under a file share.

+
+fileService.listFilesAndDirectoriesSegmented('myfileshare', '', null, function(error, result, response) {
+    if(error) {
+        // List table entity error
+    } else {
+        for (var i = 0, dir; dir = results.entries.directories[i]; i++) {
+            // Deal with directory object
+        }
+        for (var i = 0, file; file = results.entries.files[i]; i++) {
+            // Deal with file object
+        }
+    }
+});
+
+ +

Create Directory

+

FileService provides createDirectory and createDirectoryIfNotExists for creating directories under a file share.

+
+fileService.createDirectoryIfNotExists('myfileshare', 'mydirectory', function(error, result, response) {
+    if(error) {
+        // Create directory error
+    } else {
+        // Create directory successfully
+    }
+});
+
+ +

Delete Directory

+

FileService provides deleteDirectory and deleteDirectoryIfExists for deleting directories under a file share.

+
+fileService.deleteDirectoryIfExists('myfileshare', 'mydirectory', function(error, result, response) {
+    if(error) {
+        // Delete directory error
+    } else {
+        // Delete directory successfully
+    }
+});
+
+ +

Upload File

+

FileService provides createFileFromBrowserFile for uploading a file from an HTML file in browsers. +

+ +

Uploading file from stream. You can set up the file name as well as the size of this uploading session.

+
+// If one file has been selected in the HTML file input element
+var files = document.getElementById('fileinput').files;
+var file = files[0];
+
+var finishedOrError = false;
+var speedSummary = fileService.createFileFromBrowserFile('myfileshare', 'mydirectory', file.name, file, {}, function(error, result, response) {
+    finishedOrError = true;
+    if (error) {
+        // Upload file failed
+    } else {
+        // Upload successfully
+    }
+});
+refreshProgress();
+
+

Checking the upload progress with speedSummary object.

+
+speedSummary.on('progress', function () {
+    var process = speedSummary.getCompletePercent();
+    displayProcess(process);
+});
+
+

Download File

+

+ FileService provides interfaces for downloading a file into browser memory directly. + Because of browser's sandbox limitation, we cannot save the downloaded data trunks into disk until we get all the data trunks of a file into browser memory. + The browser's memory size is also limited especially for downloading huge files, so it's recommended to download a file in browser with SAS Token authorized link directly. +

+

+ Shared access signatures (SAS) are a secure way to provide granular access to files and directories without providing your storage account name or keys. Shared access signatures are often used to provide limited access to your data, such as allowing a mobile app to access files. + The following code example generates a new shared access policy that allows the shared access signatures holder to perform read operations on the myfile file, and expires 100 minutes after the time it is created. +

+
+
+ Note: You can choose to use the SAS Token in browser side, or generate a temporary SAS Token dynamically in your server side with Azure Storage C# or Node.js SDKs etc. according to your security requirements. +
+
+
+var downloadLink = fileService.getUrl('myshare', 'mydirectory', 'myfile', 'SAS_TOKEN');
+
+ +

Delete File

+

FileService provides deleteFile and deleteFileIfExists for deleting files under a file share.

+
+fileService.deleteFileIfExists('myfileshare', 'mydirectory', 'myfile', function(error, result, response) {
+    if(error) {
+        // Delete file error
+    } else {
+        // Delete file successfully
+    }
+});
+
+ +

Executable Example

+

After clicked the "Select" button on the file share list, you are able to operate with the directories and files under the selected file share.

+

    +
  • Click button to view the directories and files under your selected file share

  • +
  • Click button to create a directory share under your current directory

    +

    +
  • +
  • Click button to return to upper level directory

  • +
  • Click button to upload a local file to current directory

    +

  • +
  • Click "Delete" button to delete the directory or file

  • +
  • Click "Download" link to download a file to local

  • +
+ + Current Path: +
Uploaded Bytes:
+
+
+ 0% +
+
+
+ +

Step 6: Creating your JavaScript Application based on Azure Storage JavaScript Client Library

+
    +
  1. Setting CORS rules for your selected Azure-Storage account file service.
  2. +
  3. Including functional file(s) needed, such as "azure-storage.file.js" for file operation.
  4. +
  5. Using keyword "AzureStorage.File" to access to Azure storage JavaScript APIs for files.
  6. +
  7. Referring to API documents for detailed API definitions.
  8. +
+

You can view the source code of this sample for detailed reference.

+
+ + + + + + diff --git a/src/node_modules/azure-storage/browser/samples/sample-queue.html b/src/node_modules/azure-storage/browser/samples/sample-queue.html new file mode 100644 index 0000000..99ffe51 --- /dev/null +++ b/src/node_modules/azure-storage/browser/samples/sample-queue.html @@ -0,0 +1,462 @@ + + + + + + Azure Storage JavaScript Client Library Sample for Queue Operations + + + +
+
+

Azure Storage JavaScript Client Library Sample for Queue Operations

+
+

In this sample, we will demonstrate common scenarios for Azure Queue Storage that includes creating, listing and deleting queues and messages.

+
+ +

Azure Storage queue service provides cloud messaging between application components. In designing applications for scale, application components are often decoupled, so that they can scale independently. Queue storage delivers asynchronous messaging for communication between application components, whether they are running in the cloud, on the desktop, on an on-premises server, or on a mobile device. Queue storage also supports managing asynchronous tasks and building process work flows. +

+ +
+
+ Note: You may need set up a HTTP server to host this sample for IE11 and latest Chrome. +
+
+ +

Contents:

+ + +

Step 1: Preparing an Azure Storage account with CORS rules set

+

Cross-origin resource sharing, or CORS, must be configured on the Azure Storage account to be accessed directly from JavaScript in the browser. + You are able to set the CORS rules for specific Azure Storage account on the Azure Portal. + The "Allowed origins" could be set to "*" to allow all the origins in this sample. + For more information about CORS, see Cross-Origin Resource Sharing (CORS).

+ + +

Step 2: Importing Azure Storage JavaScript Client Library

+

+ Importing azure-storage.queue.js in your HTML file for queue operations. +

+

+<script src="azure-storage.queue.js"></script>
+
+ +

Step 3: Creating an Azure Storage Queue Service Object

+

+ The QueueService object lets you work with queues and messages. + Following code creates a QueueService object with storage account and SAS Token. +

+
+var queueUri = 'https://' + 'STORAGE_ACCOUNT' + '.queue.core.windows.net';
+var queueService = AzureStorage.Queue.createQueueServiceWithSas(queueUri, 'SAS_TOKEN');
+
+

+ You can load Azure Storage JavaScript Client Library in a CommonJS or AMD environment by JavaScript module loaders. If no module system is found, global variable AzureStorage.Queue will be set, which is the start point where we can create service objects for queue and access to the storage utilities. +

+
+
+ How to get full detailed API definitions? Currently, the JavaScript Client Library shares almost the same API definitions with Node.js SDK, besides Node.js runtime specific APIs. + Please check API details on Azure Storage API reference documents. The JavaScript global variable AzureStorage.Queue is just like the object require('azure-storage') returns in Node.js, but limits to Queue related interfaces. + Go to QueueService to view possible methods provided by QueueService class. +
+
+
+
+ Warning: Azure Storage JavaScript Client Library also supports creating QueueService based on Storage Account Key for authentication besides SAS Token. + However, for security concerns, we recommend use of a limited time SAS Token, generated by a backend web server using a Stored Access Policy. +
+
+ +

Step 4: Queue Operations

+

+ Azure Queue storage is a service for storing large numbers of messages that can be accessed from anywhere in the world via authenticated calls using HTTP or HTTPS. A single queue message can be up to 64 KB in size, and a queue can contain millions of messages, up to the total capacity limit of a storage account. +

+ +

List Queues

+

QueueService provides listQueuesSegmented and listQueuesSegmentedWithPrefix for retrieving the queue list under your storage account.

+
+queueService.listQueuesSegmented(null, function (error, results) {
+    if (error) {
+        // List queue error
+    } else {
+        for (var i = 0, queue; queue = results.entries[i]; i++) {
+            // Deal with queue object
+        }
+    }
+});
+
+ +

Create Queue

+

QueueService provides createQueue and createQueueIfNotExists for creating a queue under a storage account.

+
+queueService.createQueueIfNotExists('myqueue', function(error, result) {
+    if (error) {
+        // Create queue error
+    } else {
+        // Create queue successfully
+    }
+});
+
+ +

Delete Queue

+

QueueService provides deleteQueue and deleteQueueIfExists for deleting a queue under a storage account.

+
+queueService.deleteQueueIfExists('myqueue', function(error, result) {
+    if (error) {
+        // Delete queue error
+    } else {
+        // Delete queue successfully
+    }
+});
+
+ +

Executable Example

+

The sample will try to create an Azure Storage queue service object based on SAS Token authorization. Enter your Azure Storage account name and SAS Token here. Make sure you have set the CORS rules for the Azure Storage queue service, and the SAS Token is in valid period.

+ + +

Azure Storage queue service provides plenty of interfaces for queue operations. In following example, you can try to list all the queues under your storage account, and try to create or delete one queue from your account.

+
    +
  • Click button to view the queue list under your Azure Storage account

  • +
  • Click button to create a queue under your Azure Storage account:

    +

    +
  • +
  • Click "Delete" button to delete the queue under your Azure Storage account

  • +
  • Click "Select" button to select a queue and operate with the queue messages in next step

  • +
+
+ +

Step 5: Message Operations

+

A storage Message, in any format, of up to 64 KB. The maximum time that a message can remain in the queue is 7 days.

+
+
+ Note: Azure Storage JavaScript Client Library provides var encoder = new AzureStorage.Queue.QueueMessageEncoder.TextBase64QueueMessageEncoder() which is a Base64 encoder and docoder. + If a message content string is encoded with encoder.encode(), remember to decode it with encoder.decode() after peek the message. +
+
+ +

Peek Messages

+

QueueService provides peekMessage and peekMessages for retrieving the messages list under a queue.

+
+queueService.peekMessages('myqueue', {numOfMessages: 32}, function (error, results) {
+    if (error) {
+        // Peek messages error
+    } else {
+        for (var i = 0, message; message = results[i]; i++) {
+            // Deal with message object
+        }
+    }
+});
+
+ +

Create Message

+

QueueService provides createMessage for creating a new message to a queue.

+
+var encoder = new AzureStorage.Queue.QueueMessageEncoder.TextBase64QueueMessageEncoder();
+queueService.createMessage('myqueue', encoder.encode('mymessage'), function (error, results, response) {
+    if (error) {
+        // Create message error
+    } else {
+        // Create message successfully
+    }
+});
+
+ +

Dequeue Message

+

QueueService provides getMessages and deleteMessage for dequeuing next message in a queue.

+
+queueService.getMessages('myqueue', function(error, result, response) {
+  if(!error){
+    // Message text is in messages[0].messageText
+    var message = result[0];
+    queueService.deleteMessage('myqueue', message.messageId, message.popReceipt, function(error, response){
+      if(!error){
+        //message deleted
+      }
+    });
+  }
+});
+
+ +

Update Message

+

QueueService provides getMessages and updateMessage for updating next message in a queue.

+
+var encoder = new AzureStorage.Queue.QueueMessageEncoder.TextBase64QueueMessageEncoder();
+queueService.getMessages('myqueue', function(error, result, response) {
+  if(!error){
+    // Got the message
+    var message = result[0];
+    queueService.updateMessage('myqueue', message.messageId, message.popReceipt, 10, {messageText: encoder.encode('new text')}, function(error, result, response){
+      if(!error){
+        // Message updated successfully
+      }
+    });
+  }
+});
+
+ +

Executable Example

+

After clicked the "Select" button on the queue list in last step, you are able to operate with the queue messages under the selected queue.

+

    +
  • Click button to refresh the message list in your selected queue

  • +
  • Click button to create a message in your selected queue:

    +

    +
  • +
  • Click button to update the top queue message in your selected queue (Dequeued messages will be invisible for 30s by default.):

    +

    +
  • +
  • Click button to dequeue the top queue message in your selected queue:

  • +
+ +
+ +

Step 6: Creating your JavaScript Application based on Azure Storage JavaScript Client Library

+
    +
  1. Setting CORS rules for your selected Azure-Storage account queue service.
  2. +
  3. Including functional file(s) needed, such as "azure-storage.queue.js" for queue operation.
  4. +
  5. Using keyword "AzureStorage.Queue" to access to Azure storage JavaScript APIs for queues.
  6. +
  7. Referring to API documents for detailed API definitions.
  8. +
+

You can view the source code of this sample for detailed reference.

+
+ + + + + diff --git a/src/node_modules/azure-storage/browser/samples/sample-table.html b/src/node_modules/azure-storage/browser/samples/sample-table.html new file mode 100644 index 0000000..183a04c --- /dev/null +++ b/src/node_modules/azure-storage/browser/samples/sample-table.html @@ -0,0 +1,437 @@ + + + + + + Azure Storage JavaScript Client Library Sample for Table Operations + + + +
+
+

Azure Storage JavaScript Client Library Sample for Table Operations

+
+

In this sample, we will demonstrate common scenarios for Azure Table Storage that includes creating, listing and deleting tables and entities.

+
+ +

Azure Storage table is a service that stores structured NoSQL data in the cloud. Table storage is a key/attribute store with a schemaless design. Because Table storage is schemaless, it's easy to adapt your data as the needs of your application evolve. Access to data is fast and cost-effective for all kinds of applications. Table storage is typically significantly lower in cost than traditional SQL for similar volumes of data. +

+ +
+
+ Note: You may need set up a HTTP server to host this sample for IE11 and latest Chrome. +
+
+ +

Contents:

+ + +

Step 1: Preparing an Azure Storage account with CORS rules set

+

Cross-origin resource sharing, or CORS, must be configured on the Azure Storage account to be accessed directly from JavaScript in the browser. + You are able to set the CORS rules for specific Azure Storage account on the Azure Portal. + The "Allowed origins" could be set to "*" to allow all the origins in this sample. + For more information about CORS, see Cross-Origin Resource Sharing (CORS).

+ + +

Step 2: Importing Azure Storage JavaScript Client Library

+

+ Importing azure-storage.table.js in your HTML file for table operations. +

+

+<script src="azure-storage.table.js"></script>
+
+ +

Step 3: Creating an Azure Storage Table Service Object

+

+ The TableService object lets you work with table and entities. + Following code creates a TableService object with storage account and SAS Token. +

+
+var tableUri = 'https://' + 'STORAGE_ACCOUNT' + '.table.core.windows.net';
+var tableService = AzureStorage.Table.createTableServiceWithSas(tableUri, 'SAS_TOKEN');
+
+

+ You can load Azure Storage JavaScript Client Library in a CommonJS or AMD environment by JavaScript module loaders. If no module system is found, global variable AzureStorage.Table will be set, which is the start point where we can create service objects for table and access to the storage utilities. +

+
+
+ How to get full detailed API definitions? Currently, the JavaScript Client Library shares almost the same API definitions with Node.js SDK, besides Node.js runtime specific APIs. + Please check API details on Azure Storage API reference documents. The JavaScript global variable AzureStorage.Table is just like the object require('azure-storage') returns in Node.js, but limits to Table related interfaces. + Go to TableService to view possible methods provided by TableService class. +
+
+
+
+ Warning: Azure Storage JavaScript Client Library also supports creating TableService based on Storage Account Key for authentication besides SAS Token. + However, for security concerns, we recommend use of a limited time SAS Token, generated by a backend web server using a Stored Access Policy. +
+
+ +

Step 4: Table Operations

+

+ Table: A table is a collection of entities. Tables don't enforce a schema on entities, which means a single table can contain entities that have different sets of properties. The number of tables that a storage account can contain is limited only by the storage account capacity limit. +

+ +

List Tables

+

TableService provides listTablesSegmented and listTablesSegmentedWithPrefix for retrieving the table list under your storage account.

+
+tableService.listTablesSegmented(null, {maxResults : 200}, function (error, results) {
+    if (error) {
+        // List tables error
+    } else {
+        for (var i = 0, table; table = results.entries[i]; i++) {
+            // Deal with table object
+        }
+    }
+});
+
+ +

Create Table

+

TableService provides createTable and createTableIfNotExists for creating a table under a storage account.

+
+tableService.createTableIfNotExists('mytable', function(error, result) {
+    if (error) {
+        // Create table error
+    } else {
+        // Create table successfully
+    }
+});
+
+ +

Delete Table

+

TableService provides deleteTable and deleteTableIfExists for deleting a table under a storage account.

+
+tableService.deleteTableIfExists('mytable', function(error, result) {
+    if (error) {
+        // Delete table error
+    } else {
+        // Delete table successfully
+    }
+});
+
+ +

Executable Example

+

The sample will try to create an Azure Storage table service object based on SAS Token authorization. Enter your Azure Storage account name and SAS Token here. Make sure you have set the CORS rules for the Azure Storage table service, and the SAS Token is in valid period.

+ + +

Azure Storage table service provides plenty of interfaces for table operations. In following example, you can try to list all the tables under your storage account, and try to create or delete one table from your account.

+
    +
  • Click button to view the table list under your Azure Storage account

  • +
  • Click button to create a table under your Azure Storage account

    +

    +
  • +
  • Click "Delete" button to delete the table under your Azure Storage account

  • +
  • Click "Select" button to operate with the table entities in next step

  • +
+
+ +

Step 5: Table Entities Operations

+

Entity: An entity is a set of properties, similar to a database row. An entity can be up to 1MB in size.

+

Properties: A property is a name-value pair. Each entity can include up to 252 properties to store data. Each entity also has 3 system properties that specify a partition key, a row key, and a timestamp. Entities with the same partition key can be queried more quickly, and inserted/updated in atomic operations. An entity's row key is its unique identifier within a partition.

+ +

Query Entities

+

TableService provides queryEntities for querying a table under a storage account.

+
+var tableQuery = new AzureStorage.Table.TableQuery().top(200);
+tableService.queryEntities('mytable', tableQuery, null, function(error, result) {
+    if (error) {
+        // Query entities error
+    } else {
+        for (var i = 0, entity; entity = results.entries[i]; i++) {
+            // Deal with entity object
+        }
+    }
+});
+
+ +

Insert or Replace Entity

+

TableService provides insertEntity, insertOrReplaceEntity and insertOrMergeEntity for adding a table entity under a storage account.

+
+var insertEntity = {
+    PartitionKey: {'_': 'partitionKey'},
+    RowKey: {'_': 'rowKey'}
+};
+
+tableService.insertOrReplaceEntity('mytable', insertEntity, function(error, result, response) {
+    if(error) {
+        // Insert table entity error
+    } else {
+        // Insert table entity successfully
+    }
+});
+
+ +

Delete Entity

+

TableService provides deleteEntity for deleting a table entity under a storage account.

+
+var deleteEntity = {
+    PartitionKey: {'_': 'partitionKey'},
+    RowKey: {'_': 'rowKey'}
+};
+
+tableService.deleteEntity('mytable', deleteEntity, function(error, result, response) {
+    if(error) {
+        // Delete table entity error
+    } else {
+        // Delete table entity successfully
+    }
+});
+
+ +

Executable Example

+

After clicked the "Select" button on the table list, you are able to operate with the table entities under the selected table.

+

    +
  • Click button to refresh the entity list in your selected table

  • +
  • Click button to create an entity in your selected table. If existing entity with the sampe PartitionKey and RowKey, old entity will be merged.

    +

    + + + + +

    +
  • +
  • Click "Delete" button to delete the selected table entity in your selected table

  • +
+
+ +

Step 6: Creating your JavaScript Application based on Azure Storage JavaScript Client Library

+
    +
  1. Setting CORS rules for your selected Azure-Storage account table service.
  2. +
  3. Including functional file(s) needed, such as "azure-storage.table.js" for table operation.
  4. +
  5. Using keyword "AzureStorage.Table" to access to Azure storage JavaScript APIs for tables.
  6. +
  7. Referring to API documents for detailed API definitions.
  8. +
+

You can view the source code of this sample for detailed reference.

+
+ + + + + + diff --git a/src/node_modules/azure-storage/browser/test/blob/blobservice-upload.js b/src/node_modules/azure-storage/browser/test/blob/blobservice-upload.js new file mode 100644 index 0000000..9f9708e --- /dev/null +++ b/src/node_modules/azure-storage/browser/test/blob/blobservice-upload.js @@ -0,0 +1,411 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + + +var assert = require('assert'); +var TestSuite = require('../../../test/framework/test-suite'); +var testUtil = require('../../../test/framework/util'); + +var suite = new TestSuite('blobservice-upload-browser'); + +if (testUtil.isBrowser()) { + var azure = AzureStorage.Blob; + var containerNamesPrefix = 'upload-test-container-'; + var blobNamesPrefix = 'upload-teset-blob-'; + var containerName; + var blobName; + + describe('BlobServiceUpload', function () { + before(function (done) { + blobService = azure.createBlobService(process.env['AZURE_STORAGE_CONNECTION_STRING']); + assert.notEqual(null, blobService, 'blobService should not be null'); + + containerName = suite.getName(containerNamesPrefix); + blobService.createContainerIfNotExists(containerName, function (err) { + assert.equal(err, null); + done(); + }); + }); + + after(function (done) { + blobService.deleteContainerIfExists(containerName, function (err) { + assert.equal(err, null); + done(); + }); + }); + + beforeEach(function () { + blobName = suite.getName(blobNamesPrefix); + }); + + afterEach(function () { + }); + + describe('createBlockBlobFromBrowserFile', function () { + it('upload block blob with invalid type should not work', function () { + try { + blobService.createBlockBlobFromBrowserFile(containerName, blobName, 'abcde', function (err, res, resp) { }); + } catch (e) { + assert.notEqual(e, null); + } + }); + + it('upload block blob with md5 calculation should work', function (done) { + var size = 7 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createBlockBlobFromBrowserFile(containerName, blobName, file, { storeBlobContentMD5: true }, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + assert.notEqual(res.contentSettings.contentMD5, null); + done(); + }); + }); + }); + + it('upload block blob with 0 bytes should work', function (done) { + var size = 0; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createBlockBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.blobType, AzureStorage.Blob.Constants.BlobConstants.BlobTypes.BLOCK); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload block blob with 4 * 1024 * 1024 - 1 bytes should work', function (done) { + var size = 4 * 1024 * 1024 - 1; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createBlockBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload block blob with 4 * 1024 * 1024 bytes should work', function (done) { + var size = 4 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createBlockBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload block blob with 4 * 1024 * 1024 + 1 bytes should work', function (done) { + var size = 4 * 1024 * 1024 + 1; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createBlockBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload block blob with 128 * 1024 * 1024 bytes should work', function (done) { + var size = 128 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createBlockBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + }); + + describe('createPageBlobFromBrowserFile', function () { + it('upload page blob with invalid type should not work', function () { + try { + blobService.createPageBlobFromBrowserFile(containerName, blobName, 'abcde', function (err, res, resp) { }); + } catch (e) { + assert.notEqual(e, null); + } + }); + + it('upload page blob with md5 calculation should work', function (done) { + var size = 3 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createPageBlobFromBrowserFile(containerName, blobName, file, { storeBlobContentMD5: true }, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + assert.notEqual(res.contentSettings.contentMD5, null); + done(); + }); + }); + }); + + it('upload page blob with 0 bytes should work', function (done) { + var file = testUtil.getBrowserFile(blobName, 0); + blobService.createPageBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.blobType, AzureStorage.Blob.Constants.BlobConstants.BlobTypes.PAGE); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, 0); + done(); + }); + }); + }); + + it('upload page blob with 4 * 1024 * 1024 - 1 bytes should not work', function () { + var size = 4 * 1024 * 1024 - 1; + var file = testUtil.getBrowserFile(blobName, size); + try { + blobService.createPageBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { }); + } catch (e) { + assert.notEqual(e, null); + } + }); + + it('upload page blob with 4 * 1024 * 1024 bytes should work', function (done) { + var size = 4 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createPageBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload page blob with 128 * 1024 * 1024 bytes should work', function (done) { + var size = 128 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createPageBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + }); + + describe('createAppendBlobFromBrowserFile', function () { + it('upload append blob with invalid type should not work', function () { + try { + blobService.createAppendBlobFromBrowserFile(containerName, blobName, 'abcde', function (err, res, resp) { }); + } catch (e) { + assert.notEqual(e, null); + } + }); + + it('upload append blob with md5 calculation should work', function (done) { + var size = 7 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createAppendBlobFromBrowserFile(containerName, blobName, file, { storeBlobContentMD5: true }, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + assert.notEqual(res.contentSettings.contentMD5, null); + done(); + }); + }); + }); + + it('upload append blob with 0 bytes should work', function (done) { + var file = testUtil.getBrowserFile(blobName, 0); + blobService.createAppendBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.blobType, AzureStorage.Blob.Constants.BlobConstants.BlobTypes.APPEND); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, 0); + done(); + }); + }); + }); + + it('upload append blob with 4 * 1024 * 1024 - 1 bytes should work', function (done) { + var size = 4 * 1024 * 1024 - 1; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createAppendBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload append blob with 4 * 1024 * 1024 bytes should work', function (done) { + var size = 4 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createAppendBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload append blob with 4 * 1024 * 1024 + 1 bytes should work', function (done) { + var size = 4 * 1024 * 1024 + 1; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createAppendBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload append blob with 128 * 1024 * 1024 bytes should work', function (done) { + var size = 128 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createAppendBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + }); + + describe('appendFromBrowserFile', function () { + beforeEach(function (done) { + var size = 2 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.createAppendBlobFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('append blob with 0 bytes should work', function (done) { + var file = testUtil.getBrowserFile(blobName, 0); + blobService.appendFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.blobType, AzureStorage.Blob.Constants.BlobConstants.BlobTypes.APPEND); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, 2 * 1024 * 1024); + done(); + }); + }); + }); + + it('append blob with 4 * 1024 * 1024 - 1 bytes should work', function (done) { + var size = 4 * 1024 * 1024 - 1; + var file = testUtil.getBrowserFile(blobName, size); + blobService.appendFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size + 2 * 1024 * 1024); + done(); + }); + }); + }); + + it('append blob with 4 * 1024 * 1024 bytes should work', function (done) { + var size = 4 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.appendFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size + 2 * 1024 * 1024); + done(); + }); + }); + }); + + it('append blob with 4 * 1024 * 1024 + 1 bytes should work', function (done) { + var size = 4 * 1024 * 1024 + 1; + var file = testUtil.getBrowserFile(blobName, size); + blobService.appendFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size + 2 * 1024 * 1024); + done(); + }); + }); + }); + + it('append blob with 128 * 1024 * 1024 bytes should work', function (done) { + var size = 128 * 1024 * 1024; + var file = testUtil.getBrowserFile(blobName, size); + blobService.appendFromBrowserFile(containerName, blobName, file, function (err, res, resp) { + assert.equal(err, null); + blobService.getBlobProperties(containerName, blobName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, blobName); + assert.equal(res.contentLength, size + 2 * 1024 * 1024); + done(); + }); + }); + }); + }); + }); +} \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/test/bundle.js b/src/node_modules/azure-storage/browser/test/bundle.js new file mode 100644 index 0000000..d2c913e --- /dev/null +++ b/src/node_modules/azure-storage/browser/test/bundle.js @@ -0,0 +1,62 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var browserify = require('browserify'); +var fs = require('fs'); +var path = require('path'); + +var bs = browserify([ + path.resolve(__dirname, '../../test/services/queue/queueservice-tests.js'), + path.resolve(__dirname, '../../test/services/table/tablebatch-tests.js'), + path.resolve(__dirname, '../../test/services/table/tabledatatype-tests.js'), + path.resolve(__dirname, '../../test/services/table/tablepayload-tests.js'), + path.resolve(__dirname, '../../test/services/table/tablequery-tests.js'), + path.resolve(__dirname, '../../test/services/table/tableservice-gb-tests.js'), + path.resolve(__dirname, '../../test/services/table/tableservice-tests.js'), + path.resolve(__dirname, '../../test/services/blob/blobservice-archive-tests.js'), + path.resolve(__dirname, '../../test/services/blob/blobservice-container-tests.js'), + path.resolve(__dirname, '../../test/services/blob/blobservice-lease-tests.js'), + path.resolve(__dirname, '../../test/services/blob/blobservice-sse-tests.js'), + path.resolve(__dirname, '../../test/services/blob/blobservice-tests.js'), + path.resolve(__dirname, '../../test/services/blob/blobservice-uploaddownload-tests.js'), + path.resolve(__dirname, '../../test/services/file/fileservice-directory-tests.js'), + path.resolve(__dirname, '../../test/services/file/fileservice-file-tests.js'), + path.resolve(__dirname, '../../test/services/file/fileservice-share-tests.js'), + path.resolve(__dirname, '../../test/services/file/fileservice-sharesnapshot-tests.js'), + path.resolve(__dirname, '../../test/services/file/fileservice-sse-tests.js'), + path.resolve(__dirname, '../../test/services/file/fileservice-tests.js'), + path.resolve(__dirname, '../../test/services/file/fileservice-uploaddownload-tests.js'), + path.resolve(__dirname, '../../test/common/connectionstringparsertests.js'), + path.resolve(__dirname, '../../test/common/secondarytests.js'), + path.resolve(__dirname, '../../test/common/servicesettingstests.js'), + path.resolve(__dirname, '../../test/common/servicestatstests.js'), + path.resolve(__dirname, '../../test/common/sharedkey-tests.js'), + path.resolve(__dirname, '../../test/common/storageserviceclienttests.js'), + path.resolve(__dirname, '../../test/common/storageservicesettingstests.js'), + path.resolve(__dirname, '../../test/common/filters/exponentialretrypolicyfilter-tests.js'), + path.resolve(__dirname, '../../test/common/filters/linearretrypolicyfilter-tests.js'), + path.resolve(__dirname, '../../test/common/util/iso8061date-tests.js'), + path.resolve(__dirname, '../../test/common/util/util-tests.js'), + path.resolve(__dirname, '../../test/common/util/validate-tests.js'), + path.resolve(__dirname, '../../test/azure-tests.js'), + path.resolve(__dirname, '../../test/accountsas-tests.js'), + path.resolve(__dirname, './file/fileservice-upload.js'), + path.resolve(__dirname, './blob/blobservice-upload.js') +], { require: ['https'] }).bundle(); + +bs.pipe( + fs.createWriteStream(path.resolve(__dirname, './browser.tests.bundled.js')) +); \ No newline at end of file diff --git a/src/node_modules/azure-storage/browser/test/file/fileservice-upload.js b/src/node_modules/azure-storage/browser/test/file/fileservice-upload.js new file mode 100644 index 0000000..2514829 --- /dev/null +++ b/src/node_modules/azure-storage/browser/test/file/fileservice-upload.js @@ -0,0 +1,155 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + + +var assert = require('assert'); +var TestSuite = require('../../../test/framework/test-suite'); +var testUtil = require('../../../test/framework/util'); + +var suite = new TestSuite('fileservice-upload-browser'); + +if (testUtil.isBrowser()) { + var azure = AzureStorage.File; + var shareNamesPrefix = 'upload-test-share-'; + var fileNamesPrefix = 'upload-test-file-'; + var shareName; + var fileName; + + describe('FileServiceUpload', function () { + before(function (done) { + fileService = azure.createFileService(process.env['AZURE_STORAGE_CONNECTION_STRING']); + assert.notEqual(null, fileService, 'FileService should not be null'); + + shareName = suite.getName(shareNamesPrefix); + fileService.createShareIfNotExists(shareName, function (err) { + assert.equal(err, null); + done(); + }); + }); + + after(function (done) { + fileService.deleteShareIfExists(shareName, function (err) { + assert.equal(err, null); + done(); + }); + }); + + beforeEach(function () { + fileName = suite.getName(fileNamesPrefix); + }); + + afterEach(function (done) { + fileService.deleteFileIfExists(shareName, '', fileName, function (err) { + assert.equal(err, null); + done(); + }); + }); + + describe('createFileFromBrowserFile', function () { + it('upload file with invalid type should not work', function () { + try { + fileService.createFileFromBrowserFile(shareName, '', fileName, 'abcde', function (err, res, resp) {}); + } catch (e) { + assert.notEqual(e, null); + } + }); + + it('upload file with md5 calculation should work', function (done) { + var size = 33 * 1024 * 1024; + var file = testUtil.getBrowserFile(fileName, size); + fileService.createFileFromBrowserFile(shareName, '', fileName, file, { storeFileContentMD5: true}, function (err, res, resp) { + assert.equal(err, null); + fileService.getFileProperties(shareName, '', fileName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, fileName); + assert.equal(res.contentLength, size); + assert.notEqual(res.contentSettings.contentMD5, null); + done(); + }); + }); + }); + + it('upload file with 0 bytes should work', function (done) { + var file = testUtil.getBrowserFile(fileName, 0); + fileService.createFileFromBrowserFile(shareName, '', fileName, file, function (err, res, resp) { + assert.equal(err, null); + fileService.getFileProperties(shareName, '', fileName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, fileName); + assert.equal(res.contentLength, 0); + done(); + }); + }); + }); + + it('upload file with 4 * 1024 * 1024 - 1 bytes should work', function (done) { + var size = 4 * 1024 * 1024 - 1; + var file = testUtil.getBrowserFile(fileName, size); + fileService.createFileFromBrowserFile(shareName, '', fileName, file, function (err, res, resp) { + assert.equal(err, null); + fileService.getFileProperties(shareName, '', fileName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, fileName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload file with 4 * 1024 * 1024 bytes should work', function (done) { + var size = 4 * 1024 * 1024; + var file = testUtil.getBrowserFile(fileName, size); + fileService.createFileFromBrowserFile(shareName, '', fileName, file, function (err, res, resp) { + assert.equal(err, null); + fileService.getFileProperties(shareName, '', fileName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, fileName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload file with 4 * 1024 * 1024 + 1 bytes should work', function (done) { + var size = 4 * 1024 * 1024 + 1; + var file = testUtil.getBrowserFile(fileName, size); + fileService.createFileFromBrowserFile(shareName, '', fileName, file, function (err, res, resp) { + assert.equal(err, null); + fileService.getFileProperties(shareName, '', fileName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, fileName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + + it('upload file with 128 * 1024 * 1024 bytes should work', function (done) { + var size = 128 * 1024 * 1024; + var file = testUtil.getBrowserFile(fileName, size); + fileService.createFileFromBrowserFile(shareName, '', fileName, file, function (err, res, resp) { + assert.equal(err, null); + fileService.getFileProperties(shareName, '', fileName, function (err, res) { + assert.equal(err, null); + assert.equal(res.name, fileName); + assert.equal(res.contentLength, size); + done(); + }); + }); + }); + }); + }); +} \ No newline at end of file diff --git a/src/node_modules/azure-storage/examples/samples/blobuploaddownloadsample.js b/src/node_modules/azure-storage/examples/samples/blobuploaddownloadsample.js new file mode 100644 index 0000000..9a8ef73 --- /dev/null +++ b/src/node_modules/azure-storage/examples/samples/blobuploaddownloadsample.js @@ -0,0 +1,246 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* 1. Demonstrates how to upload all files from a given directory in parallel +* +* 2. Demonstrates how to download all files from a given blob container to a given destination directory. +* +* 3. Demonstrate making requests using AccessConditions. +*/ + +var fs = require('fs'); + +var azure; +if (fs.existsSync('absolute path to azure-storage.js')) { + azure = require('absolute path to azure-storage'); +} else { + azure = require('azure-storage'); +} + +var container = 'updownsample3'; +var blob = 'updownsample'; +var blobAccess = 'updownaccesssample'; + +var blobService = azure.createBlobService() + .withFilter(new azure.ExponentialRetryPolicyFilter()); + +// optionally set a proxy +/*var proxy = { + protocol: 'http:', + host: '127.0.0.1', + port: 8888 +}; + +blobService.setProxy(proxy); +*/ + +function uploadSample() { + var processArguments = process.argv; + if (processArguments.length !== 4) { + console.log('Incorrect number of arguments. Should be: srcPath destPath'); + process.exit(1); + } + + var srcPath = processArguments[2]; + var destPath = processArguments[3]; + + console.log('Starting blobuploaddownloadsample.'); + + // Create the container + createContainer(container, function () { + + // Demonstrates how to upload all files from a given directoy + uploadBlobs(srcPath, container, function () { + + // Demonstrates how to download all files from a given + // blob container to a given destination directory. + downloadBlobs(container, destPath, function () { + + // Demonstrate making requests using AccessConditions. + useAccessCondition(container, function () { + + // Delete the container + deleteContainer(container, function () { + console.log('Ending blobuploaddownloadsample.'); + }); + }); + }); + }); + }); +} + +function createContainer (container, callback) { + // Create the container. + blobService.createContainerIfNotExists(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Created the container ' + container); + callback(); + } + }); +} + +function uploadBlobs(sourceDirectoryPath, containerName, callback) { + console.log('Entering uploadBlobs.'); + + // validate directory is valid. + if (!fs.existsSync(sourceDirectoryPath)) { + console.log(sourceDirectoryPath + ' is an invalid directory path.'); + } else { + // Search the directory and generate a list of files to upload. + walk(sourceDirectoryPath, function (error, files) { + if (error) { + console.log(error); + } else { + var finished = 0; + + // generate and schedule an upload for each file + files.forEach(function (file) { + var blobName = file.replace(/^.*[\\\/]/, ''); + + blobService.createBlockBlobFromLocalFile(containerName, blobName, file, function (error) { + finished++; + + if (error) { + console.log(error); + } else { + console.log(' Blob ' + blobName + ' upload finished.'); + + if (finished === files.length) { + // Wait until all workers complete and the blobs are uploaded to the server. + console.log('All files uploaded'); + callback(); + } + } + }); + }); + } + }); + } +} + +function downloadBlobs(containerName, destinationDirectoryPath, callback) { + console.log('Entering downloadBlobs.'); + + // Validate directory + if (!fs.existsSync(destinationDirectoryPath)) { + console.log(destinationDirectoryPath + ' does not exist. Attempting to create this directory...'); + fs.mkdirSync(destinationDirectoryPath); + console.log(destinationDirectoryPath + ' created.'); + } + + // NOTE: does not handle pagination. + blobService.listBlobsSegmented(containerName, null, function (error, result) { + if (error) { + console.log(error); + } else { + var blobs = result.entries; + var blobsDownloaded = 0; + + blobs.forEach(function (blob) { + blobService.getBlobToLocalFile(containerName, blob.name, destinationDirectoryPath + '/' + blob.name, function (error2) { + blobsDownloaded++; + + if (error2) { + console.log(error2); + } else { + console.log(' Blob ' + blob.name + ' download finished.'); + + if (blobsDownloaded === blobs.length) { + // Wait until all workers complete and the blobs are downloaded + console.log('All files downloaded'); + callback(); + } + } + }); + }); + } + }); +} + +function useAccessCondition(containerName, callback) { + console.log('Entering useAccessCondition.'); + + // Create a blob. + blobService.createBlockBlobFromText(containerName, blobAccess, 'hello', function (error, blobInformation) { + if (error) { + console.log(error); + } else { + console.log(' Created the blob ' + blobInformation.name); + console.log(' Blob Etag is: ' + blobInformation.etag); + + // Use the If-not-match ETag condition to access the blob. By + // using the IfNoneMatch condition we are asserting that the blob needs + // to have been modified in order to complete the request. In this + // sample no other client is accessing the blob, so this will fail as + // expected. + var options = { accessConditions: { EtagNonMatch: blobInformation.etag} }; + blobService.createBlockBlobFromText(containerName, blobInformation.name, 'new hello', options, function (error2) { + if (error2 && error2.statusCode === 412 && error2.code === 'ConditionNotMet') { + console.log('Attempted to recreate the blob with the if-none-match access condition and got the expected exception.'); + callback(); + } else { + console.log(' Blob was incorrectly updated'); + if (error2) { + console.log(error2); + } + } + }); + } + }); +} + +function deleteContainer (container, callback) { + // Delete the container. + blobService.deleteContainerIfExists(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Deleted the container ' + container); + callback(); + } + }); +} + +// Utility function + +var walk = function (dir, done) { + var results = []; + fs.readdir(dir, function (err, list) { + if (err) return done(err); + var i = 0; + (function next() { + var file = list[i++]; + if (!file) return done(null, results); + file = dir + '/' + file; + fs.stat(file, function (err2, stat) { + if (stat && stat.isDirectory()) { + walk(file, function (err3, res) { + results = results.concat(res); + next(); + }); + } else { + results.push(file); + next(); + } + }); + })(); + }); +}; + +uploadSample(); diff --git a/src/node_modules/azure-storage/examples/samples/continuationsample.js b/src/node_modules/azure-storage/examples/samples/continuationsample.js new file mode 100644 index 0000000..b58d3f3 --- /dev/null +++ b/src/node_modules/azure-storage/examples/samples/continuationsample.js @@ -0,0 +1,159 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* This sample demonstrates how to handle continuation tokens and virtual "pages" of results when performing a listing +* operation on the blob service. +* +* This sample peformsthe following steps: +* +* 0. Create container. +* +* 1. Create 50 blobs. +* +* 2. List the first 10(page size) blobs. +* +* 3. Check whether there are more results. +* +* 4. Repeat 2 and 3 until complete. +* +*/ + +var fs = require('fs'); + +var azure; +if (fs.existsSync('absolute path to azure-storage.js')) { + azure = require('absolute path to azure-storage'); +} else { + azure = require('azure-storage'); +} + +var container = 'paginationsample'; +var blob = 'contsample'; +var blobs = []; + +var blobService = azure.createBlobService() + .withFilter(new azure.ExponentialRetryPolicyFilter()); + +// optionally set a proxy +/*var proxy = { + protocol: 'http:', + host: '127.0.0.1', + port: 8888 +}; + +blobService.setProxy(proxy); +*/ + +var totalBlobsCount; +var pageSize; + +function continuationSample () { + var processArguments = process.argv; + if (processArguments.length !== 4) { + console.log('Incorrect number of arguments. Should be: numBlobs pageSize [deleteContainer]\nTry: 51 10'); + process.exit(1); + } + + totalBlobsCount = parseInt(processArguments[2], 10); + pageSize = parseInt(processArguments[3], 10); + + console.log('Starting continuationSample.'); + + // Create the container + createContainer(container, function () { + + console.log('Entering createBlobs.'); + + // Upload blobs from text. + createBlobs(totalBlobsCount, function () { + var options = { + maxResults: pageSize, + include: 'metadata', + locationMode: azure.StorageUtilities.LocationMode.PRIMARY_THEN_SECONDARY + }; + + console.log('Entering listBlobs.'); + + // List blobs using continuation tokens. + listBlobs(options, null, function () { + + // Delete the container + deleteContainer(container, function () { + console.log('Ending continuationSample.'); + }); + }); + }); + }); +} + +function createContainer (container, callback) { + // Create the container. + blobService.createContainerIfNotExists(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Created the container ' + container); + callback(); + } + }); +} + +function createBlobs(currentBlobsCount, callback) { + // Upload totalBlobsCount blobs to the container. + var options = {}; + options.metadata = {'hello':'world'}; + + blobService.createBlockBlobFromText(container, blob + currentBlobsCount, 'blob' + currentBlobsCount, options, function (error) { + if (error) { + console.log(error); + } else if (currentBlobsCount > 1) { + createBlobs(--currentBlobsCount, callback); + } else { + console.log(' Created ' + totalBlobsCount + ' blobs.'); + callback(); + } + }); +} + +function listBlobs (options, token, callback) { + blobService.listBlobsSegmented(container, token, options, function(error, result) { + blobs.push.apply(blobs, result.entries); + var token = result.continuationToken; + if(token) { + console.log(' Received a page of results. There are ' + result.entries.length + ' blobs on this page.'); + listBlobs(options, token, callback); + } + else { + console.log(' Completed listing. There are ' + blobs.length + ' blobs'); + callback(); + } + }); +} + +function deleteContainer (container, callback) { + // Delete the container. + blobService.deleteContainerIfExists(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Deleted the container ' + container); + callback(); + } + }); +} + +continuationSample(); diff --git a/src/node_modules/azure-storage/examples/samples/requestresponseeventssample.js b/src/node_modules/azure-storage/examples/samples/requestresponseeventssample.js new file mode 100644 index 0000000..c5b6017 --- /dev/null +++ b/src/node_modules/azure-storage/examples/samples/requestresponseeventssample.js @@ -0,0 +1,96 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* Demonstrates how to define sendingrequest and receivedresponse event handlers. +*/ +var fs = require('fs'); + +var azure; +if (fs.existsSync('absolute path to azure-storage.js')) { + azure = require('absolute path to azure-storage'); +} else { + azure = require('azure-storage'); +} + +var container = 'sendingrequestevent3'; + +// The service object which will define the event handlers +var blobService = azure.createBlobService(); + +// optionally set a proxy +/*var proxy = { + protocol: 'http:', + host: '127.0.0.1', + port: 8888 +}; + +blobService.setProxy(proxy); +blobService2.setProxy(proxy); +*/ + +// the sending request event handler +var sendingRequestHandler = function (webresource) { + webresource.withHeader('x-ms-custom-header', 'value'); + console.log(' sending request event handler called'); +}; + +// the response received event handler +var responseReceivedHandler = function (response) { + console.log(' received response event handler called'); +}; + +function eventHandlersSample () { + console.log('Starting eventHandlersSample.'); + + // set the event handlers + blobService.on('sendingRequestEvent', sendingRequestHandler); + blobService.on('receivedResponseEvent', responseReceivedHandler); + + // create and delete a container with these handlers + createContainer(container, function () { + // Delete the container + deleteContainer(container, function () { + console.log('Ending eventHandlersSample.'); + }); + }); +} + +function createContainer (container, callback) { + // Create the container. + blobService.createContainer(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Created the container ' + container); + callback(); + } + }); +} + +function deleteContainer (container, callback) { + // Delete the container. + blobService.deleteContainer(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Deleted the container ' + container); + callback(); + } + }); +} + +eventHandlersSample(); diff --git a/src/node_modules/azure-storage/examples/samples/retrypolicysample.js b/src/node_modules/azure-storage/examples/samples/retrypolicysample.js new file mode 100644 index 0000000..a359a84 --- /dev/null +++ b/src/node_modules/azure-storage/examples/samples/retrypolicysample.js @@ -0,0 +1,230 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* Demonstrates how to use pre-written retry policies and how to define a customized retry policy. +* +* In the sample for pre-written retry policies, we simply show how to use pre-written retry policies. +* +* In the sample for customized retry policy, we define a customized retry policy, +* which retries on the "The specified container is being deleted" exception besides the server exceptions. +* +* Note that only in the cloud(not the storage emulator), "The specified container is being deleted" exceptions will be +* sent if users immediately recreate a container after delete it. +*/ + +var fs = require('fs'); +if (!fs.existsSync) { + fs.existsSync = require('path').existsSync; +} + +var azure; +if (fs.existsSync('absolute path to azure-storage.js')) { + azure = require('absolute path to azure-storage'); +} else { + azure = require('azure-storage'); +} + +var RetryPolicyFilter = azure.RetryPolicyFilter; +var LocationMode = azure.StorageUtilities.LocationMode; + +var container = 'customretrypolicysample'; + +var blobService; + +/** + * Demonstrate how to use pre-written retry policies. + * By default, no retry will be performed with service instances newly created. + * Several pre-written retry policies are available with modifiable settings, + * and can be used through associating filter. + */ +function setRetries() { + console.log('Starting Sample 1 - setRetries.'); + + // By default, no retry will be performed with all kinds of services created + // by Azure storage client library for Node.js. + var blobServiceWithoutRetry = azure.createBlobService(); + console.log('BlobService instance created, no retry will be performed by default.'); + + // There are two pre-written retry policies: ExponentialRetryPolicyFilter + // and LinearRetryPolicyFilter can be used with modifiable settings. + // Use an exponential retry with customized settings. + var fileServiceWithExponentialRetry = azure.createFileService().withFilter( + new azure.ExponentialRetryPolicyFilter( + 3, // retryCount is set to 3 times. + 4000, // retryInterval is set to 4 seconds. + 3000, // minRetryInterval is set to 3 seconds. + 120000 // maxRetryInterval is set to 120 seconds. + )); + console.log('FileService instance created and associated with ExponentialRetryPolicyFilter.'); + console.log(' Retries will be performed with exponential back-off.'); + + // Use a default linear retry policy. + var tableServiceWithLinearRetry = azure.createTableService().withFilter( + new azure.LinearRetryPolicyFilter()); // By default, retryCount is set to 3 times and retryInterval is set to 30 seconds. + console.log('TableService instance created and associated with LinearRetryPolicyFilter,'); + console.log(' Retries will be performed with linear back-off.'); + + console.log('Ending Sample 1 - setRetries.'); +} + +/** + * Demonstrate how to use custom retry policy. + * Any custom retry logic may be used by simply creating and setting RetryPolicyFilter instance. + */ +function setCustomRetryPolicy() { + console.log('Starting Sample 2 - setCustomRetryPolicy.'); + + // Step 1 : Set the retry policy to customized retry policy which will + // not retry on any failing status code other than the excepted one. + var retryOnContainerBeingDeleted = new RetryPolicyFilter(); + retryOnContainerBeingDeleted.retryCount = 5; + retryOnContainerBeingDeleted.retryInterval = 5000; + + retryOnContainerBeingDeleted.shouldRetry = function (statusCode, retryData) { + console.log(' Made the request at ' + new Date().toUTCString() + ', received StatusCode: ' + statusCode); + + var retryInfo = {}; + + // retries on any bad status code other than 409 + if (statusCode >= 300 && statusCode !== 409 && statusCode !== 500) { + retryInfo.retryable = false; + } else { + var currentCount = (retryData && retryData.retryCount) ? retryData.retryCount : 0; + + retryInfo = { + retryInterval: this.retryInterval + 2000 * currentCount, + retryable: currentCount < this.retryCount + }; + } + + return retryInfo; + }; + + blobService = azure.createBlobService().withFilter(retryOnContainerBeingDeleted); + + // optionally set a proxy + /*var proxy = { + protocol: 'http:', + host: '127.0.0.1', + port: 8888 + }; + + blobService.setProxy(proxy);*/ + + + // Step 2: Create the container + createContainer(function () { + + // Step 3: Fetch attributes from the container using LocationMode.SECONDARY_THEN_PRIMARY + fetchAttributesContainer(function () { + + // Step 4: Lease the container + leaseContainer(function () { + + // Step 5: Lease the container again, retrying until it succeeds + leaseContainer(function () { + + // Step 6: Delete the container + deleteContainer(function () { + console.log('Ending Sample 2 - setCustomRetryPolicy.'); + }); + }); + }); + }); + }); +} + +function createContainer(callback) { + console.log('Entering createContainer.'); + + // Create the container. + blobService.createContainerIfNotExists(container, function (error, containerResult) { + if (error) { + console.log(error); + } else { + console.log(' Container info '); + console.log(containerResult); + console.log('Created the container ' + container); + callback(); + } + }); +} + +function fetchAttributesContainer(callback) { + console.log('Entering fetchAttributesContainer.'); + + var options = { + locationMode: LocationMode.SECONDARY_THEN_PRIMARY + }; + + // Get the properties of the container. + blobService.getContainerProperties(container, options, function (error) { + if (error) { + console.log(error); + } else { + console.log('Downloaded container properties from ' + container); + callback(); + } + }); +} + +function leaseContainer(callback) { + console.log('Entering leaseContainer.'); + + // Try to acquire the lease. + blobService.acquireLease(container, null, {leaseDuration: 15}, function (error, lease) { + if (error) { + console.log(error); + } + else { + console.log('Acquired lease from ' + container + ' with leaseid' + lease.id); + callback(); + } + }); +} + +function deleteContainer(callback) { + console.log('Entering deleteContainer.'); + + // Break the lease. + blobService.breakLease(container, null, {leaseBreakPeriod: 0}, function (error) { + if (error) { + console.log(error); + } else { + console.log(' Broke the lease on the container ' + container); + } + + // Delete the container. + blobService.deleteContainer(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Deleted the container ' + container); + callback(); + } + }); + }); +} + +function runAllSamples() { + console.log("Starting retrypolicySample."); + setRetries(); + setCustomRetryPolicy(); + console.log("Ending retrypolicySample."); +} + +runAllSamples(); diff --git a/src/node_modules/azure-storage/examples/samples/sassample.js b/src/node_modules/azure-storage/examples/samples/sassample.js new file mode 100644 index 0000000..ee9ff15 --- /dev/null +++ b/src/node_modules/azure-storage/examples/samples/sassample.js @@ -0,0 +1,218 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* In this sample, we demonstrate how to generate and use the blob level shared access signature and the container level +* shared access signature. +* +* In the blob level shared access signature sample, there are the following steps: +* +* 1. Create a container and a blob. +* +* 2. Generate a shared access signature for the blob and download the blob using it. +* +* 3. Upload a "ReadWrite" policy and a "Read" permission to the container. +* +* 4. Generate a shared access signature for the blob using the policy id and download the blob using it. +*/ + +var fs = require('fs'); +var assert = require('assert'); + +var azure; +if (fs.existsSync('../../lib/azure-storage.js')) { + azure = require('../../lib/azure-storage'); +} else { + azure = require('azure-storage'); +} +var BlobUtilities = azure.BlobUtilities; + +var container = 'container-sassample'; +var blob = 'blob-sassample'; + +var blobService = azure.createBlobService(); + +// optionally set a proxy +/*var proxy = { + protocol: 'http:', + host: '127.0.0.1', + port: 8888 +}; + +blobService.setProxy(proxy); +blobService2.setProxy(proxy); +*/ + +function sasSample () { + console.log('Starting sasSample.'); + + // Create the container. + createContainer(function () { + + // Create a blob. + createBlob( function () { + + // Create a shared access signature and use it to download the blob just created. + downloadBlobUsingSharedAccessSignature(function () { + + // Add Shared Access policies to the container + createPolicies(function () { + + // Use the read policy just created + usePermissions(function () { + + // Delete the container + deleteContainer(function () { + console.log('Ending sasSample.'); + }); + }); + }); + }); + }); + }); +} + +function createContainer (callback) { + // Create the container. + blobService.createContainerIfNotExists(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Created the container ' + container); + callback(); + } + }); +} + +function createBlob (callback) { + // Create the blob + blobService.createBlockBlobFromText(container, blob, 'test blob', function (error) { + if (error) { + console.log(error); + } + else { + console.log('Created the blob ' + container); + callback(); + } + }); +} + +function downloadBlobUsingSharedAccessSignature (callback) { + var startDate = new Date(); + var expiryDate = new Date(startDate); + expiryDate.setMinutes(startDate.getMinutes() + 5); + + var sharedAccessPolicy = { + AccessPolicy: { + Permissions: BlobUtilities.SharedAccessPermissions.READ, + Start: startDate, + Expiry: expiryDate + } + }; + + var sharedAccessSignatureToken = blobService.generateSharedAccessSignature(container, blob, sharedAccessPolicy); + + var sharedBlobService = azure.createBlobServiceWithSas(blobService.host, sharedAccessSignatureToken); + + // Download the blob by using the shared access signature URL. + sharedBlobService.getBlobProperties(container, blob, function (error) { + if (error) { + console.log(error); + } else { + console.log('Downloaded the blob ' + blob + ' by using the shared access signature URL: \n ' + sharedBlobService.getUrl(container, blob, sharedAccessSignatureToken)); + } + + callback(); + }); +} + +function createPolicies (callback) { + // Create a "ReadWrite" policy and a "Read" policy. + var readWriteStartDate = new Date(); + var readWriteExpiryDate = new Date(readWriteStartDate); + readWriteExpiryDate.setMinutes(readWriteStartDate.getMinutes() + 10); + + var signedIdentifiers = { + readwrite: { + Start: readWriteStartDate, + Expiry: readWriteExpiryDate, + Permissions: 'rw' + }, + read: { + Expiry: readWriteStartDate, + Permissions: 'r' + } + }; + + // Wait 30 seconds for the container acl to be processed + var func = function () { + var options = { publicAccessLevel: BlobUtilities.BlobContainerPublicAccessType.CONTAINER }; + blobService.setContainerAcl(container, signedIdentifiers, options, function(error) { + if (error) { + console.log(error); + } else { + console.log('Uploaded the permissions for the container ' + container); + callback(); + } + }); + }; + + setTimeout(func, 30000); +} + +function usePermissions (callback) { + // Read, write the blob using the shared access signature from "ReadWrite" policy. + var readWriteAccessPolicy = { + Id: 'readwrite' + }; + + var headers = { + cacheControl: 'no-transform', + contentDisposition: 'attachment', + contentEncoding: 'gzip', + contentLanguage: 'tr,en', + contentType: 'text/html' + }; + + var sharedAccessSignatureToken = blobService.generateSharedAccessSignature(container, null, {Id: 'readwrite'}, headers); + + var sharedBlobService = azure.createBlobServiceWithSas(blobService.host, sharedAccessSignatureToken); + + sharedBlobService.getBlobProperties(container, blob, function (error, result) { + if (error) { + console.log(error); + } else { + console.log('Downloaded the blob ' + blob + ' by using the shared access signature URL: \n ' + sharedBlobService.getUrl(container, blob, sharedAccessSignatureToken)); + + } + + callback(); + }); +} + +function deleteContainer (callback) { + // Delete the container. + blobService.deleteContainerIfExists(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Deleted the container ' + container); + callback(); + } + }); +} + +sasSample(); diff --git a/src/node_modules/azure-storage/examples/samples/snapshotsample.js b/src/node_modules/azure-storage/examples/samples/snapshotsample.js new file mode 100644 index 0000000..b1069bf --- /dev/null +++ b/src/node_modules/azure-storage/examples/samples/snapshotsample.js @@ -0,0 +1,219 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* This sample is used to provide an overview of blob snapshots and how to work with them. +* +* 1. Upload blocks and commit them. +* +* 2. Take a snapshot for that blob. +* +* 3. Re-upload one of the three blocks and commit them. +* +* 4. Get the snapshot. +* +* 5. List blobs including snapshots. +* +* 6. Delete the snapshot. +*/ + +var fs = require('fs'); + +var azure; +if (fs.existsSync('absolute path to azure-storage.js')) { + azure = require('absolute path to azure-storage'); +} else { + azure = require('azure-storage'); +} + +var BlobUtilities = azure.BlobUtilities; + +var container = 'snapshotsample'; +var blob = 'snapshotsample'; + +var blockList = ['b1', 'b2', 'b3']; +var blockContent = ['content1', 'content2', 'content3']; +var blockContentAlternative2 = 'alternative2'; + +var blobService = azure.createBlobService(); + +// optionally set a proxy +/*var proxy = { + protocol: 'http:', + host: '127.0.0.1', + port: 8888 +}; + +blobService.setProxy(proxy);*/ + +function snapshotSample () { + var processArguments = process.argv; + if (processArguments.length !== 2) { + console.log('Incorrect number of arguments. No arguments should be given.'); + process.exit(1); + } + + console.log('Starting snapshotSample.'); + + // Create the container + createContainer(function () { + + // Upload a blob + uploadBlockBlob(function () { + + // Create a snapshot of the blob + createSnapshot(function (snapshot) { + + // Update the blob + blockContent[1] = blockContentAlternative2; + uploadBlockBlob(function () { + + // Create a snapshot of the modified blob + getSnapshotToText(snapshot, function () { + + // List the blob and its snapshots + listSnapshots(function () { + + // Delete the snapshots + deleteSnapshots(function () { + + // Delete the container + deleteContainer(function () { + console.log('Ending snapshotSample.'); + }); + }); + }); + }); + }); + }); + }); + }); +} + +function createContainer (callback) { + // Create the container. + blobService.createContainerIfNotExists(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Created the container ' + container); + callback(); + } + }); +} + +function uploadBlockBlob(callback) { + // Upload 3 blocks and commit them. + var blocks = 0; + var blobCallbackCounter = function (block) { + ++blocks; + if (blocks === blockList.length) { + console.log(' Created ' + blocks + ' blocks.'); + + blobService.commitBlocks(container, blob, {LatestBlocks: blockList}, function (error4) { + if (error4) { + console.log(error4); + } + else { + console.log('Committed the blob ' + blob); + callback(); + } + }); + } + } + + for(var i = 0; i < blockList.length; i++) { + console.log(' Uploading a block. ID: ' + blockList[i] + ' Content: ' + blockContent[i]); + blobService.createBlockFromText(blockList[i], container, blob, blockContent[i], blockContent[i].length, function (error) { + if (error) { + console.log(error); + } else { + blobCallbackCounter(); + } + }); + } +} + +function createSnapshot(callback) { + // Creates a snapshot. + blobService.createBlobSnapshot(container, blob, function (error, snapshot) { + if (error) { + console.log(error); + } else { + console.log('Created a snapshot for the blob ' + blob); + callback(snapshot); + } + }); +} + +function getSnapshotToText(snapshot, callback) { + // Gets a snapshot. + blobService.getBlobToText(container, blob, {snapshotId: snapshot}, function (error, text) { + if (error) { + console.log(error); + } else { + console.log('Snapshot ' + blob + '?' + snapshot + ' text: ' + text); + callback(); + } + }); +} + +function listSnapshots (callback) { + // List the blobs, including snapshots + blobService.listBlobsSegmented(container, null, { include: BlobUtilities.BlobListingDetails.SNAPSHOTS }, function (error, results) { + if (error) { + console.log(error); + } else { + console.log('Listing the blobs under the container ' + container); + + results.entries.forEach(function (blob) { + var snapshot = ''; + if (blob.snapshot) { + snapshot = '; BlobSnapshot: ' + blob.snapshot; + } + console.log(' BlobName: ' + blob.name + snapshot); + }); + + callback(); + } + }); +}; + +function deleteSnapshots (callback) { + // Delete the snapshot. + blobService.deleteBlob(container, blob, { deleteSnapshots: BlobUtilities.SnapshotDeleteOptions.SNAPSHOTS_ONLY }, function (error) { + if (error) { + console.log(error); + } else { + console.log('Deleted the snapshots.'); + callback(); + } + }); +}; + +function deleteContainer (callback) { + // Delete the container. + blobService.deleteContainerIfExists(container, function (error) { + if (error) { + console.log(error); + } else { + console.log('Deleted the container ' + container); + callback(); + } + }); +} + +snapshotSample(); diff --git a/src/node_modules/azure-storage/examples/samples/tablequerysample.js b/src/node_modules/azure-storage/examples/samples/tablequerysample.js new file mode 100644 index 0000000..477c9ec --- /dev/null +++ b/src/node_modules/azure-storage/examples/samples/tablequerysample.js @@ -0,0 +1,165 @@ +var fs = require('fs'); +var assert = require('assert'); +var util = require('util'); +var http = require('http'); + +var azure; +if (fs.existsSync('absolute path to azure-storage.js')) { + azure = require('absolute path to azure-storage'); +} else { + azure = require('azure-storage'); +} + +var TableQuery = azure.TableQuery; +var TableUtilities = azure.TableUtilities; +var eg = TableUtilities.entityGenerator; + +var tableName = 'tablequerysample'; +var tableService = azure.createTableService(); + +// optionally set a proxy +/*var proxy = { + protocol: 'http:', + host: '127.0.0.1', + port: 8888 +}; + +tableService.setProxy(proxy);*/ + +var entity1 = { + PartitionKey: eg.String('partition1'), + RowKey: eg.String('row1'), + integerfield: eg.Int32(1), + stringfield: eg.String('stringfield value'), + longfield: eg.Int64('92233720368547758') +}; + +var entity2 = { + PartitionKey: eg.String('partition1'), + RowKey: eg.String('row2'), + stringfield: eg.String('stringfield value'), + longfield: eg.Int64('8547758') +}; + +function performTableQuery() { + // Create the table + tableService.createTable(tableName, function (error1) { + assert.equal(error1, null); + + // Insert the entities + insertEntities(function() { + + // Return all entities + queryAllEntities(function () { + + // Return entities Where certain conditions are met + queryEntitiesWhere(function () { + + // Return the Top n entities + queryEntitiesTop(function () { + + // Return Select fields from entities + queryEntitiesSelect(function () { + + // Delete the table + tableService.deleteTable(tableName, function (error2) { + assert.equal(error2, null); + }); + }); + }); + }); + }); + }); + }); +} + +function insertEntities(callback) { + // insert the entities + tableService.insertEntity(tableName, entity1, function (error1) { + assert.equal(error1, null); + tableService.insertEntity(tableName, entity2, function (error2) { + assert.equal(error2, null); + callback(); + }); + }); +} + +function queryAllEntities(callback) { + // Select all fields + tableService.queryEntities(tableName, null, null, function (error, result) { + assert.equal(error, null); + assert.notEqual(result, null); + assert.notEqual(result.entries, null); + + var entities = result.entries; + assert.equal(entities.length, 2); + var entityResult = entities[0]; + assert.equal(entityResult.stringfield._, entity1.stringfield._); + assert.equal(entityResult.longfield._, entity1.longfield._); + + callback(); + }); +} + +function queryEntitiesWhere(callback) { + // Select only the entries where the longfield is great than 10,000,000 + + // equivalently: var tableQuery = new TableQuery().where('longfield == ?int64?', '10000000'); + var tableQuery = new TableQuery().where(TableQuery.int64Filter('longfield', TableUtilities.QueryComparisons.GREATER_THAN, '10000000')); + + tableService.queryEntities(tableName, tableQuery, null, function (error, result) { + assert.equal(error, null); + assert.notEqual(result, null); + assert.notEqual(result.entries, null); + + var entities = result.entries; + assert.equal(entities.length, 1); + + var entityResult = entities[0]; + assert.equal(entityResult.longfield._, entity1.longfield._); + + callback(); + }); +} + +function queryEntitiesTop(callback) { + // Select only the top entry + var tableQuery = new TableQuery().top(1); + + tableService.queryEntities(tableName, tableQuery, null, function (error, result) { + assert.equal(error, null); + assert.notEqual(result, null); + assert.notEqual(result.entries, null); + + var entities = result.entries; + assert.equal(entities.length, 1); + + var entityResult = entities[0]; + assert.equal(entityResult.integerfield._, entity1.integerfield._); + assert.equal(entityResult.longfield._, entity1.longfield._); + + callback(); + }); +} + +function queryEntitiesSelect(callback) { + // Select specific field + var tableQuery = new TableQuery().select('integerfield'); + + tableService.queryEntities(tableName, tableQuery, null, function (error, result) { + assert.equal(error, null); + assert.notEqual(result, null); + assert.notEqual(result.entries, null); + + var entities = result.entries; + assert.equal(entities.length, 2); + + var entityResult = entities[0]; + assert.equal(entityResult.integerfield._, entity1.integerfield._); + assert.equal(entityResult.longfield, undefined); + + callback(); + }); +} + +performTableQuery(); \ No newline at end of file diff --git a/src/node_modules/azure-storage/gruntfile.js b/src/node_modules/azure-storage/gruntfile.js new file mode 100644 index 0000000..4abffa2 --- /dev/null +++ b/src/node_modules/azure-storage/gruntfile.js @@ -0,0 +1,71 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +module.exports = function(grunt) { + //init stuff + grunt.initConfig({ + + //jsdoc config + jsdoc: { + dist: { + src: [ + 'README.md', + 'lib/azure-storage.js', + 'lib/common/filters/retrypolicyfilter.js', + 'lib/common/filters/linearretrypolicyfilter.js', + 'lib/common/filters/exponentialretrypolicyfilter.js', + 'lib/common/services/storageutilities.js', + 'lib/services/blob/blobservice.core.js', + 'lib/services/blob/blobservice.node.js', + 'lib/services/blob/blobservice.browser.js', + 'lib/services/blob/models/blobresult.js', + 'lib/services/blob/models/containerresult.js', + 'lib/services/blob/models/leaseresult.js', + 'lib/services/blob/blobutilities.js', + 'lib/services/queue/queueservice.js', + 'lib/services/queue/queuemessageencoder.js', + 'lib/services/queue/queueutilities.js', + 'lib/services/queue/models/queueresult.js', + 'lib/services/queue/models/queuemessageresult.js', + 'lib/services/table/tableservice.js', + 'lib/services/table/tablebatch.js', + 'lib/services/table/tablequery.js', + 'lib/services/table/tableutilities.js', + 'lib/services/file/fileservice.core.js', + 'lib/services/file/fileservice.node.js', + 'lib/services/file/fileservice.browser.js', + 'lib/services/file/fileutilities.js', + 'lib/services/file/models/shareresult.js', + 'lib/services/file/models/directoryresult.js', + 'lib/services/file/models/fileresult.js', + 'lib/common/services/storageserviceclient.js', + 'lib/common/diagnostics/logger.js' + ], + options: { + destination: 'docs', + template: 'node_modules/ink-docstrap/template', + configure: 'jsdoc/jsdoc.json' + } + } + }, + + }); + + grunt.loadNpmTasks('grunt-jsdoc'); + + grunt.registerTask('doc', ['jsdoc']); + grunt.registerTask('default', ['doc',]); +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/jsdoc/jsdoc.json b/src/node_modules/azure-storage/jsdoc/jsdoc.json new file mode 100644 index 0000000..2737ecc --- /dev/null +++ b/src/node_modules/azure-storage/jsdoc/jsdoc.json @@ -0,0 +1,27 @@ +{ + "tags" : { + "allowUnknownTags" : true + }, + "plugins" : ["plugins/markdown"], + + "templates" : { + "cleverLinks" : false, + "monospaceLinks" : false, + "dateFormat" : "ddd MMM Do YYYY", + "outputSourceFiles" : true, + "outputSourcePath" : true, + "systemName" : "Microsoft Azure Storage Client Library for Node.js and JavaScript", + "footer" : "", + "navType" : "vertical", + "theme" : "cosmo", + "linenums" : true, + "collapseSymbols" : false, + "inverseNav" : true, + "highlightTutorialCode" : false, + "protocol": "fred://" + }, + "markdown" : { + "parser" : "gfm", + "hardwrap" : true + } +} \ No newline at end of file diff --git a/src/node_modules/azure-storage/karma.conf.js b/src/node_modules/azure-storage/karma.conf.js new file mode 100644 index 0000000..342cc61 --- /dev/null +++ b/src/node_modules/azure-storage/karma.conf.js @@ -0,0 +1,90 @@ +module.exports = function (config) { + config.set({ + + // base path that will be used to resolve all patterns (eg. files, exclude) + basePath: './', + + // frameworks to use + // available frameworks: https://npmjs.org/browse/keyword/karma-adapter + frameworks: ['mocha'], + + plugins: [ + 'karma-mocha', + 'karma-mocha-reporter', + 'karma-chrome-launcher', + 'karma-edge-launcher', + 'karma-firefox-launcher', + 'karma-ie-launcher', + 'karma-env-preprocessor' + ], + + // list of files / patterns to load in the browser + files: [ + 'browser/bundle/azure-storage.blob.js', + 'browser/bundle/azure-storage.table.js', + 'browser/bundle/azure-storage.queue.js', + 'browser/bundle/azure-storage.file.js', + 'browser/test/browser.tests.bundled.js' + ], + + // list of files / patterns to exclude + exclude: [], + + // preprocess matching files before serving them to the browser + // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor + preprocessors: { + '**/*.js': ['env'], + }, + + // inject following environment values into browser testing with window.__env__ + // environment values MUST be exported or set with same console running "karma start" + // https://www.npmjs.com/package/karma-env-preprocessor + envPreprocessor: [ + 'AZURE_STORAGE_CONNECTION_STRING', + 'AZURE_STORAGE_CONNECTION_STRING_SSE_ENABLED_ACCOUNT', + 'AZURE_STORAGE_CONNECTION_STRING_BLOB_ACCOUNT', + 'AZURE_STORAGE_CONNECTION_STRING_PREMIUM_ACCOUNT' + ], + + // test results reporter to use + // possible values: 'dots', 'progress' + // available reporters: https://npmjs.org/browse/keyword/karma-reporter + reporters: ['mocha'], + + // web server port + port: 9876, + + // enable / disable colors in the output (reporters and logs) + colors: true, + + // level of logging + // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG + logLevel: config.LOG_INFO, + + // enable / disable watching file and executing tests whenever any file changes + autoWatch: false, + + // start these browsers + // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher + // 'Chrome', 'Firefox', 'Edge', 'IE' + browsers: ['Chrome'], + + // Continuous Integration mode + // if true, Karma captures browsers, runs the tests and exits + singleRun: false, + + // Concurrency level + // how many browser should be started simultaneous + concurrency: 1, + + browserNoActivityTimeout: 600000, + + client: { + mocha: { + // change Karma's debug.html to the mocha web reporter + reporter: 'html', + timeout: '600000' + } + } + }) +} \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/azure-storage.js b/src/node_modules/azure-storage/lib/azure-storage.js new file mode 100644 index 0000000..9b74ade --- /dev/null +++ b/src/node_modules/azure-storage/lib/azure-storage.js @@ -0,0 +1,433 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var exports = module.exports; + +/** +* Creates a connection string that can be used to create a service which runs on the storage emulator. The emulator must be downloaded separately. +* +* @param {string} [proxyUri] The proxyUri. By default, http://127.0.0.1 +* @return {string} A connection string representing the development storage credentials. +* @example +* var azure = require('azure-storage'); +* var devStoreCreds = azure.generateDevelopmentStorageCredentials(); +* var blobService = azure.createBlobService(devStoreCreds); +*/ +exports.generateDevelopmentStorageCredentials = function (proxyUri) { + var devStore = 'UseDevelopmentStorage=true;'; + if(proxyUri){ + devStore += 'DevelopmentStorageProxyUri=' + proxyUri; + } + + return devStore; +}; + +/** + * Table client exports. + * @ignore + */ +var TableService = require('./services/table/tableservice'); + +exports.TableService = TableService; +exports.TableQuery = require('./services/table/tablequery'); +exports.TableBatch = require('./services/table/tablebatch'); +exports.TableUtilities = require('./services/table/tableutilities'); + +/** +* Creates a new {@link TableService} object. +* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY +* environment variables will be used. +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {TableService} A new TableService object. +* +*/ +exports.createTableService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new TableService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +/** +* Creates a new {@link TableService} object using the host Uri and the SAS credentials provided. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} sasToken The Shared Access Signature token. +* @return {TableService} A new TableService object with the SAS credentials. +*/ +exports.createTableServiceWithSas = function (hostUri, sasToken) { + return new TableService(null, null, hostUri, sasToken); +}; + +/** + * Blob client exports. + * @ignore + */ +var BlobService = require('./services/blob/blobservice.node'); + +exports.BlobService = BlobService; +exports.BlobUtilities = require('./services/blob/blobutilities'); + +/** +* Creates a new {@link BlobService} object. +* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY +* environment variables will be used. +* +* @param {string} storageAccountOrConnectionString The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {BlobService} A new BlobService object. +*/ +exports.createBlobService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new BlobService(storageAccountOrConnectionString, storageAccessKey, host, null); +}; + +/** +* Creates a new {@link BlobService} object using the host Uri and the SAS credentials provided. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} sasToken The Shared Access Signature token. +* @return {BlobService} A new BlobService object with the SAS credentials. +*/ +exports.createBlobServiceWithSas = function (host, sasToken) { + return new BlobService(null, null, host, sasToken); +}; + +/** +* Creates a new {@link BlobService} object using the host Uri and the {@link TokenCredential} provided, which supports OAuth. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {TokenCredential} tokenCredential The token credential object. +* @return {BlobService} A new BlobService object with the {@link TokenCredential} object. +* +* @example +* var azure = require('azure-storage'); +* var tokenCredential = new azure.TokenCredential('myOAuthAccessToken'); +* var blobService = azure.createBlobServiceWithTokenCredential('https://account.blob.core.windows.net', tokenCredential); +* tokenCredential.set('updatedOAuthAccessToken'); +*/ +exports.createBlobServiceWithTokenCredential = function (host, tokenCredential) { + return new BlobService(null, null, host, null, null, tokenCredential); +}; + +/** +* Creates a new {@link BlobService} object using the host uri and anonymous access. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {BlobService} A new BlobService object with the anonymous credentials. +*/ +exports.createBlobServiceAnonymous = function (host) { + return new BlobService(null, null, host, null); +}; + +/** + * File client exports. + * @ignore + */ +var FileService = require('./services/file/fileservice.node'); + +exports.FileService = FileService; +exports.FileUtilities = require('./services/file/fileutilities'); + +/** +* Creates a new {@link FileService} object. +* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY +* environment variables will be used. +* +* @param {string} storageAccountOrConnectionString The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {FileService} A new FileService object. +*/ +exports.createFileService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new FileService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +/** +* Creates a new {@link FileService} object using the host Uri and the SAS credentials provided. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} sasToken The Shared Access Signature token. +* @return {FileService} A new FileService object with the SAS credentials. +*/ +exports.createFileServiceWithSas = function (hostUri, sasToken) { + return new FileService(null, null, hostUri, sasToken); +}; + +/** + * Queue client exports. + * @ignore + */ +var QueueService = require('./services/queue/queueservice'); + +exports.QueueService = QueueService; +exports.QueueUtilities = require('./services/queue/queueutilities'); +exports.QueueMessageEncoder = require('./services/queue/queuemessageencoder'); + +/** +* Creates a new {@link QueueService} object. +* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY +* environment variables will be used. +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {QueueService} A new QueueService object. +*/ +exports.createQueueService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new QueueService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +/** +* Creates a new {@link QueueService} object using the host Uri and the SAS credentials provided. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} sasToken The Shared Access Signature token. +* @return {QueueService} A new QueueService object with the SAS credentials. +*/ +exports.createQueueServiceWithSas = function(hostUri, sasToken) { + return new QueueService(null, null, hostUri, sasToken); +}; + +/** +* Creates a new {@link QueueService} object using the host Uri and the {@link TokenCredential} provided, which supports OAuth. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {TokenCredential} tokenCredential The TokenCredential object. +* @return {QueueService} A new QueueService object with the {@link TokenCredential} object. +* +* @example +* var azure = require('azure-storage'); +* var tokenCredential = new azure.TokenCredential('myOAuthAccessToken'); +* var queueService = azure.createQueueServiceWithTokenCredential('https://account.queue.core.windows.net', tokenCredential); +* tokenCredential.set('updatedOAuthAccessToken'); +*/ +exports.createQueueServiceWithTokenCredential = function (host, tokenCredential) { + return new QueueService(null, null, host, null, null, tokenCredential); +}; + +/** +* Account SAS +* @ignore +*/ + +var azureCommon = require('./common/common.node'); +var StorageServiceClient = azureCommon.StorageServiceClient; +var SharedKey = azureCommon.SharedKey; +/** +* Generates an account shared access signature token +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {SharedAccessServices} sharedAccessPolicy.AccessPolicy.Services The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.Services`. +* @param {SharedAccessResourceTypes} sharedAccessPolicy.AccessPolicy.ResourceTypes The resource type for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.ResourceTypes`. +* @param {SharedAccessPermissions} sharedAccessPolicy.AccessPolicy.Permissions The permissions for a shared access signature. +* Refer to `Constants.AccountSasConstants.Permissions`. +* @param {date} sharedAccessPolicy.AccessPolicy.Start The time at which the Shared Access Signature becomes valid. +* @param {date} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired. +* @param {string} sharedAccessPolicy.AccessPolicy.IPAddressOrRange The permission type. Refer to `Constants.AccountSasConstants.ResourceTypes`. +* @param {string} sharedAccessPolicy.AccessPolicy.Protocols The possible protocols. Refer to `Constants.AccountSasConstants.ResourceTypes`. +*/ +exports.generateAccountSharedAccessSignature = function(storageAccountOrConnectionString, storageAccessKey, sharedAccessAccountPolicy) +{ + var storageSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey); + var sharedKey = new SharedKey(storageSettings._name, storageSettings._key); + + return sharedKey.generateAccountSignedQueryString(sharedAccessAccountPolicy); +}; + + +/** +* A callback that returns a response object. +* @callback errorOrResponse +* @param {object} error If an error occurs, will contain information about the error. +* @param {object} response Contains information about the response returned for the operation. +* For example, HTTP status codes and headers. +*/ + +/** +* A callback that returns result and response objects. +* @callback errorOrResult +* @param {object} error If an error occurs, will contain information about the error. +* @param {object} result The result of the operation. +* @param {object} response Contains information about the response returned for the operation. +* For example, HTTP status codes and headers. +*/ + + +/** +* Specifying conditional headers for blob service operations. See http://msdn.microsoft.com/en-us/library/dd179371.aspx for more information. +* @typedef {object} AccessConditions +* @property {string} EtagMatch If the ETag for the blob matches the specified ETag. +* Specify the wildcard character (*) to perform the operation only if the resource does exist, and fail the operation if it does not exist. +* @property {string} EtagNonMatch If the ETag for the blob does not match the specified ETag. +* Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. +* @property {Date|string} DateModifedSince If the blob has been modified since the specified date. +* @property {Date|string} DateUnModifiedSince If the blob has not been modified since the specified date. +* @property {Number|string} SequenceNumberLessThanOrEqual If the blob's sequence number is less than or equal to the specified value. +* For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. +* @property {Number|string} SequenceNumberLessThan If the blob's sequence number is less than the specified value. +* For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. +* @property {Number|string} SequenceNumberEqual If the blob's sequence number is equal to the specified value. +* For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. +* @property {Number|string} MaxBlobSize If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the specified value. +* For Append Block operation only. See https://msdn.microsoft.com/en-us/library/mt427365.aspx for more information. +* @property {Number|string} MaxAppendPosition If the append position is equal to the specified value. +* For Append Block operation only. See https://msdn.microsoft.com/en-us/library/mt427365.aspx for more information. +*/ + +/** +* The properties of a storage service, including properties of Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. +* @typedef {object} ServiceProperties +* @property {string} DefaultServiceVersion The default version of Storage Analytics currently in use. +* @property {LoggingProperties} Logging The Logging settings. +* @property {MetricsProperties} HourMetrics The HourMetrics settings provide a summary of request statistics grouped by API in hourly aggregates. +* @property {MetricsProperties} MinuteMetrics The HourMetrics settings provide request statistics grouped by API for each minute. +* @property {object} Cors Groups all CORS rules. +* @property {CorsRule[]} Cors.CorsRules Groups settings for a `[CORS rule]{@link CorsRule}`. +*/ + +/** +* The properties of a storage account. +* @typedef {object} AccountProperties +* @property {string} SkuName The header that specifies storage SKU, also known as account type. +* @property {string} AccountKind The header that describes the flavour of the storage account, also known as account kind. +*/ + +/** +* The properties of a blob storage service, including properties of Storage Analytics, CORS (Cross-Origin Resource Sharing) rules and Static Webiste configurations. +* @typedef {object} BlobServiceProperties +* @property {string} DefaultServiceVersion The default version of Storage Analytics currently in use. +* @property {LoggingProperties} Logging The Logging settings. +* @property {MetricsProperties} HourMetrics The HourMetrics settings provide a summary of request statistics grouped by API in hourly aggregates. +* @property {MetricsProperties} MinuteMetrics The HourMetrics settings provide request statistics grouped by API for each minute. +* @property {StaticWebsiteProperties} StaticWebsite The Azure Static Website settings. +* @property {object} Cors Groups all CORS rules. +* @property {CorsRule[]} Cors.CorsRules Groups settings for a `[CORS rule]{@link CorsRule}`. +*/ + +/** +* The Azure Static Website settings. +* @typedef {object} StaticWebsiteProperties +* @property {boolean} Enabled Whether feature of Static Website is enabled. +* @property {string} IndexDocument Indicates index document page path. +* @property {string} ErrorDocument404Path Indicates 404 document page path. +*/ + +/** +* The Azure Analytics logging settings. +* @typedef {object} LoggingProperties +* @property {string} Version The version of Storage Analytics currently in use for logging. +* @property {boolean} Delete Indicates whether delete requests are being logged. +* @property {boolean} Read Indicates whether read requests are being logged. +* @property {boolean} Write Indicates whether write requests are being logged. +* @property {RetentionPolicy} RetentionPolicy The retention policy of the log data. +*/ + +/** +* The setting of Azure Analytics summary of request stastics. +* @typedef {object} MetricsProperties +* @property {string} Version The version of Storage Analytics currently in use for hour metrics. +* @property {string} Enabled Indicates whether metrics are enabled +* @property {boolean} IncludeAPIs Indicates whether metrics generate summary statistics for called API operations. +* @property {RetentionPolicy} RetentionPolicy The retention policy of the metrics data. +*/ + +/** +* The CORS rule of a storage service. +* @typedef {object} CorsRule +* @property {string[]} AllowedMethods A list of HTTP methods that are allowed to be executed by the origin. For Azure Storage, permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. +* @property {string[]} AllowedOrigins A list of origin domains that are allowed via CORS, or "*" if all domains are allowed. +* @property {string[]} AllowedHeaders A list of headers allowed to be part of the cross-origin request. +* @property {string[]} ExposedHeaders A list of response headers to expose to CORS clients. +* @property {number} MaxAgeInSeconds The number of seconds that the client/browser should cache a preflight response. +*/ + +/** +* The Azure Analytics logging or metrics retention policy. +* @typedef {object} RetentionPolicy +* @property {boolean} Enabled Indicates whether a retention policy is enabled for the storage service. +* @property {number} Days Indicates the number of days that logging data is retained. All data older than this value will be deleted. +*/ + +/** +* The access policy. +* @typedef {object} AccessPolicy +* @property {string} Permissions The permission type. +* @property {Date} Start The time at which the access policy becomes valid. +* @property {Date} Expiry The time at which the access policy becomes expired. +* @property {string} IPAddressOrRange An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @property {string} Protocols The protocols permitted for a request made with the SAS. +* @property {string} Services The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. +* @property {string} ResourceTypes The resource type for a shared access signature associated with this shared access policy. +*/ + +/** +* The service statistics. +* @typedef {object} ServiceStats +* @property {object} GeoReplication The geo replication stastics. +* @property {string} GeoReplication.Status The status of the secondary location. +* @property {Date} GeoReplication.LastSyncTime A GMT date/time value, to the second. +* All primary writes preceding this value are guaranteed to be available for read operations at the secondary. +* Primary writes after this point in time may or may not be available for reads. +*/ + +/** +* The range. +* @typedef {object} Range +* @property {number} start The start of the range. +* @property {number} end The end of the range. +*/ + +/** +* The range diff. Refer to https://msdn.microsoft.com/en-us/library/azure/mt736912.aspx +* @typedef {object} RangeDiff +* @property {number} start The start of the range. +* @property {number} end The end of the range. +* @property {boolean} isCleared If the range is cleared or not. + +*/ + +exports.Constants = azureCommon.Constants; +exports.StorageUtilities = azureCommon.StorageUtilities; +exports.AccessCondition = azureCommon.AccessCondition; + +exports.SR = azureCommon.SR; +exports.StorageServiceClient = StorageServiceClient; +exports.Logger = azureCommon.Logger; +exports.WebResource = azureCommon.WebResource; +exports.Validate = azureCommon.validate; +exports.date = azureCommon.date; +exports.TokenCredential = azureCommon.TokenCredential; + +// Other filters +exports.LinearRetryPolicyFilter = azureCommon.LinearRetryPolicyFilter; +exports.ExponentialRetryPolicyFilter = azureCommon.ExponentialRetryPolicyFilter; +exports.RetryPolicyFilter = azureCommon.RetryPolicyFilter; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/common.browser.js b/src/node_modules/azure-storage/lib/common/common.browser.js new file mode 100644 index 0000000..ebea0f5 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/common.browser.js @@ -0,0 +1,21 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var azureCommon = require('./common.core'); + +azureCommon.BrowserFileReadStream = require('./streams/browserfilereadstream'); + +module.exports = azureCommon; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/common.core.js b/src/node_modules/azure-storage/lib/common/common.core.js new file mode 100644 index 0000000..1865cd0 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/common.core.js @@ -0,0 +1,68 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var exports = module.exports; + +var azureutil = require('./util/util'); + +require('./util/patch-xmlbuilder'); + +var nodeVersion = azureutil.getNodeVersion(); +if (nodeVersion.major === 0 && nodeVersion.minor > 8 && !(nodeVersion.minor > 10 || (nodeVersion.minor === 10 && nodeVersion.patch >= 3))) { + throw new Error('The Microsoft Azure node SDK does not work with node versions > 0.9.0 and < 0.10.3. Please upgrade to node >= 0.10.3'); +} + +exports.xmlbuilder = require('xmlbuilder'); +exports.xml2js = require('xml2js'); + +exports.Logger = require('./diagnostics/logger'); +exports.WebResource = require('./http/webresource'); + +// Services +exports.StorageServiceClient = require('./services/storageserviceclient'); + +// Models +exports.ServicePropertiesResult = require('./models/servicepropertiesresult'); +exports.ServiceStatsParser = require('./models/servicestatsparser'); +exports.AclResult = require('./models/aclresult'); +exports.TokenCredential = require('./models/tokencredential'); + +// Filters +exports.LinearRetryPolicyFilter = require('./filters/linearretrypolicyfilter'); +exports.ExponentialRetryPolicyFilter = require('./filters/exponentialretrypolicyfilter'); +exports.RetryPolicyFilter = require('./filters/retrypolicyfilter'); + +// Signing +exports.SharedAccessSignature = require('./signing/sharedaccesssignature'); +exports.SharedKey = require('./signing/sharedkey'); + +// Streams +exports.BatchOperation = require('./streams/batchoperation'); +exports.ChunkAllocator = require('./streams/chunkallocator'); +exports.ChunkStream = require('./streams/chunkstream'); +exports.ChunkStreamWithStream = require('./streams/chunkstreamwithstream'); +exports.SpeedSummary = require('./streams/speedsummary'); +exports.BufferStream = require('./streams/bufferstream'); + +// Utilities +exports.Constants = require('./util/constants'); +exports.SR = require('./util/sr'); +exports.date = require('./util/date'); +exports.ISO8061Date = require('./util/iso8061date'); +exports.util = require('./util/util'); +exports.validate = require('./util/validate'); +exports.StorageUtilities = require('./util/storageutilities'); +exports.AccessCondition = require('./util/accesscondition'); \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/common.node.js b/src/node_modules/azure-storage/lib/common/common.node.js new file mode 100644 index 0000000..d2a38d4 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/common.node.js @@ -0,0 +1,22 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var azureCommon = require('./common.core'); + +// Streams +azureCommon.FileReadStream = require('./streams/filereadstream'); + +module.exports = azureCommon; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/diagnostics/logger.js b/src/node_modules/azure-storage/lib/common/diagnostics/logger.js new file mode 100644 index 0000000..7ba28c6 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/diagnostics/logger.js @@ -0,0 +1,148 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* Creates a new Logger object +* @class +* The Logger class is used to write log information. +* +* @constructor +* +* @param {string} [level] The log level. Refer to Logger.LogLevels. +* @param {object} [loggerFunction] The function to write log information. +*/ +function Logger(level, loggerFunction) { + /** + * The log level. Refer to the Logger.LogLevels for available log levels. + * @name Logger#level + * @type {string} + * @see Logger.LogLevels + */ + this.level = level; + + this.loggerFunction = loggerFunction; + + if (!this.loggerFunction) { + this.loggerFunction = this.defaultLoggerFunction; + } +} + +/** +* The available log levels. +* +* @const +* @enum {string} +*/ +Logger.LogLevels = { + /** + * System is unusable. + */ + EMERGENCY: 'emergency', + + /** + * Action must be taken immediately. + */ + ALERT : 'alert', + + /** + * Critical condition. + */ + CRITICAL : 'critical', + + /** + * Error condition. + */ + ERROR : 'error', + + /** + * Warning condition. + */ + WARNING : 'warning', + + /** + * Normal but significant condition. + */ + NOTICE : 'notice', + + /** + * Purely informational message. + */ + INFO : 'info', + + /** + * Application debug messages. + */ + DEBUG : 'debug' +}; + +Logger.logPriority = [ + Logger.LogLevels.EMERGENCY, + Logger.LogLevels.ALERT, + Logger.LogLevels.CRITICAL, + Logger.LogLevels.ERROR, + Logger.LogLevels.WARNING, + Logger.LogLevels.NOTICE, + Logger.LogLevels.INFO, + Logger.LogLevels.DEBUG +]; + +Logger.prototype.log = function (level, msg) { + this.loggerFunction(level, msg); +}; + +Logger.prototype.emergency = function(msg) { + this.log(Logger.LogLevels.EMERGENCY, msg); +}; + +Logger.prototype.critical = function(msg) { + this.log(Logger.LogLevels.CRITICAL, msg); +}; + +Logger.prototype.alert = function(msg) { + this.log(Logger.LogLevels.ALERT, msg); +}; + +Logger.prototype.error = function(msg) { + this.log(Logger.LogLevels.ERROR, msg); +}; + +Logger.prototype.warn = function(msg) { + this.log(Logger.LogLevels.WARNING, msg); +}; + +Logger.prototype.notice = function(msg) { + this.log(Logger.LogLevels.NOTICE, msg); +}; + +Logger.prototype.info = function(msg) { + this.log(Logger.LogLevels.INFO, msg); +}; + +Logger.prototype.debug = function(msg) { + this.log(Logger.LogLevels.DEBUG, msg); +}; + +Logger.prototype.defaultLoggerFunction = function(logLevel , msg) { + var currentLevelIndex = Logger.logPriority.indexOf(this.level); + var logLevelIndex = Logger.logPriority.indexOf(logLevel); + var time = new Date(); + var timeStamp = time.toISOString(); + if (logLevelIndex <= currentLevelIndex) { + console.log('[' + timeStamp + ']' + this.level + ' : ' + msg); + } +}; + +module.exports = Logger; diff --git a/src/node_modules/azure-storage/lib/common/errors/errors.js b/src/node_modules/azure-storage/lib/common/errors/errors.js new file mode 100644 index 0000000..1e8e852 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/errors/errors.js @@ -0,0 +1,68 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = require('util'); +var _ = require('underscore'); + +function captureStackTrace(targetObject, constructorOpt) { + if (Error.captureStackTrace) { + Error.captureStackTrace(targetObject, constructorOpt); + } +} + +function ArgumentError(argumentName, message) { + captureStackTrace(this, this.constructor); + this.name = this.constructor.name; + this.argumentName = argumentName; + this.message = message || util.format('Invalid or missing argument supplied: %s', argumentName); +} +util.inherits(ArgumentError, Error); + +function ArgumentNullError(argumentName, message) { + captureStackTrace(this, this.constructor); + this.name = this.constructor.name; + this.argumentName = argumentName; + this.message = message || util.format('Missing argument: %s', argumentName); +} + +util.inherits(ArgumentNullError, Error); + +function StorageError(message, properties) { + captureStackTrace(this, this.constructor); + this.name = this.constructor.name; + this.message = message; + + if(properties){ + _.extend(this, properties); + } +} + +util.inherits(StorageError, Error); + +function TimeoutError(message) { + captureStackTrace(this, this.constructor); + this.name = this.constructor.name; + this.message = message; +} + +util.inherits(TimeoutError, Error); + +module.exports.ArgumentError = ArgumentError; +module.exports.ArgumentNullError = ArgumentNullError; +module.exports.StorageError = StorageError; +module.exports.TimeoutError = TimeoutError; +module.exports.captureStackTrace = captureStackTrace; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/filters/exponentialretrypolicyfilter.js b/src/node_modules/azure-storage/lib/common/filters/exponentialretrypolicyfilter.js new file mode 100644 index 0000000..ce269d0 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/filters/exponentialretrypolicyfilter.js @@ -0,0 +1,95 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +'use strict'; + +var RetryPolicyFilter = require('./retrypolicyfilter'); +/** +* Creates a new 'ExponentialRetryPolicyFilter' instance. +* @class +* The ExponentialRetryPolicyFilter allows you to retry operations, +* using an exponential back-off interval between retries. +* To apply a filter to service operations, use `withFilter` +* and specify the filter to be used when creating a service. +* @constructor +* @param {number} [retryCount=3] The client retry count. +* @param {number} [retryInterval=30000] The client retry interval, in milliseconds. +* @param {number} [minRetryInterval=3000] The minimum retry interval, in milliseconds. +* @param {number} [maxRetryInterval=90000] The maximum retry interval, in milliseconds. +* +* @example +* var azure = require('azure-storage'); +* var retryOperations = new azure.ExponentialRetryPolicyFilter(); +* var blobService = azure.createBlobService().withFilter(retryOperations) +*/ +function ExponentialRetryPolicyFilter(retryCount, retryInterval, minRetryInterval, maxRetryInterval) { + this.retryCount = retryCount ? retryCount : ExponentialRetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT; + this.retryInterval = retryInterval ? retryInterval : ExponentialRetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL; + this.minRetryInterval = minRetryInterval ? minRetryInterval : ExponentialRetryPolicyFilter.DEFAULT_CLIENT_MIN_RETRY_INTERVAL; + this.maxRetryInterval = maxRetryInterval ? maxRetryInterval : ExponentialRetryPolicyFilter.DEFAULT_CLIENT_MAX_RETRY_INTERVAL; +} + +/** +* Represents the default client retry interval, in milliseconds. +*/ +ExponentialRetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL = 1000 * 30; + +/** +* Represents the default client retry count. +*/ +ExponentialRetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT = 3; + +/** +* Represents the default maximum retry interval, in milliseconds. +*/ +ExponentialRetryPolicyFilter.DEFAULT_CLIENT_MAX_RETRY_INTERVAL = 1000 * 90; + +/** +* Represents the default minimum retry interval, in milliseconds. +*/ +ExponentialRetryPolicyFilter.DEFAULT_CLIENT_MIN_RETRY_INTERVAL = 1000 * 3; + +/** + * Determines if the operation should be retried and how long to wait until the next retry. + * + * @param {number} statusCode The HTTP status code. + * @param {object} requestOptions The request options. + * @return {retryInfo} Information about whether the operation qualifies for a retry and the retryInterval. + */ +ExponentialRetryPolicyFilter.prototype.shouldRetry = function (statusCode, requestOptions) { + var retryData = (requestOptions && requestOptions.retryContext) ? requestOptions.retryContext : {}; + + // Adjust retry interval + var incrementDelta = Math.pow(2, retryData.retryCount) - 1; + var boundedRandDelta = this.retryInterval * 0.8 + Math.floor(Math.random() * (this.retryInterval * 1.2 - this.retryInterval * 0.8)); + incrementDelta *= boundedRandDelta; + + retryData.retryInterval = Math.min(this.minRetryInterval + incrementDelta, this.maxRetryInterval); + + return RetryPolicyFilter._shouldRetryOnError(statusCode, requestOptions); +}; + +/** +* Handles an operation with an exponential retry policy. +* +* @param {Object} requestOptions The original request options. +* @param {function} next The next filter to be handled. +*/ +ExponentialRetryPolicyFilter.prototype.handle = function (requestOptions, next) { + RetryPolicyFilter._handle(this, requestOptions, next); +}; + +module.exports = ExponentialRetryPolicyFilter; diff --git a/src/node_modules/azure-storage/lib/common/filters/linearretrypolicyfilter.js b/src/node_modules/azure-storage/lib/common/filters/linearretrypolicyfilter.js new file mode 100644 index 0000000..9342dfa --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/filters/linearretrypolicyfilter.js @@ -0,0 +1,75 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +'use strict'; + +var RetryPolicyFilter = require('./retrypolicyfilter'); + +/** +* Creates a new LinearRetryPolicyFilter instance. +* @class +* The LinearRetryPolicyFilter allows you to retry operations, +* using an linear back-off interval between retries. +* To apply a filter to service operations, use `withFilter` +* and specify the filter to be used when creating a service. +* @constructor +* @param {number} [retryCount=3] The client retry count. +* @param {number} [retryInterval=30000] The client retry interval, in milliseconds. +* +* @example +* var azure = require('azure-storage'); +* var retryOperations = new azure.LinearRetryPolicyFilter(); +* var blobService = azure.createBlobService().withFilter(retryOperations) +*/ +function LinearRetryPolicyFilter(retryCount, retryInterval) { + this.retryCount = retryCount ? retryCount : LinearRetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT; + this.retryInterval = retryInterval ? retryInterval : LinearRetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL; +} + +/** +* Represents the default client retry interval, in milliseconds. +*/ +LinearRetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL = 1000 * 30; + +/** +* Represents the default client retry count. +*/ +LinearRetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT = 3; + +/** +* Determines if the operation should be retried and how long to wait until the next retry. +* + * @param {number} statusCode The HTTP status code. + * @param {object} requestOptions The request options. + * @return {retryInfo} Information about whether the operation qualifies for a retry and the retryInterval. +*/ +LinearRetryPolicyFilter.prototype.shouldRetry = function (statusCode, requestOptions) { + var retryData = (requestOptions && requestOptions.retryContext) ? requestOptions.retryContext : {}; + retryData.retryInterval = this.retryInterval; + + return RetryPolicyFilter._shouldRetryOnError(statusCode, requestOptions); +}; + +/** +* Handles an operation with a linear retry policy. +* +* @param {Object} requestOptions The original request options. +* @param {function} next The next filter to be handled. +*/ +LinearRetryPolicyFilter.prototype.handle = function (requestOptions, next) { + RetryPolicyFilter._handle(this, requestOptions, next); +}; + +module.exports = LinearRetryPolicyFilter; diff --git a/src/node_modules/azure-storage/lib/common/filters/retrypolicyfilter.js b/src/node_modules/azure-storage/lib/common/filters/retrypolicyfilter.js new file mode 100644 index 0000000..7e2a977 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/filters/retrypolicyfilter.js @@ -0,0 +1,234 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var azureutil = require('../util/util'); +var Constants = require('../util/constants'); +var StorageUtilities = require('../util/storageutilities'); +var extend = require('util')._extend; + +/** +* Creates a new RetryPolicyFilter instance. +* @class +* The RetryPolicyFilter allows you to retry operations, +* using a custom retry policy. Users are responsible to +* define the shouldRetry method. +* To apply a filter to service operations, use `withFilter` +* and specify the filter to be used when creating a service. +* @constructor +* @param {number} [retryCount=30000] The client retry count. +* @param {number} [retryInterval=3] The client retry interval, in milliseconds. +* +* @example +* var azure = require('azure-storage'); +* var retryPolicy = new azure.RetryPolicyFilter(); +* retryPolicy.retryCount = 3; +* retryPolicy.retryInterval = 3000; +* retryPolicy.shouldRetry = function(statusCode, retryContext) { +* +* }; +* var blobService = azure.createBlobService().withFilter(retryPolicy); +*/ +function RetryPolicyFilter(retryCount, retryInterval) { + this.retryCount = retryCount ? retryCount : RetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT; + this.retryInterval = retryInterval ? retryInterval : RetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL; +} + +/** +* Represents the default client retry interval, in milliseconds. +*/ +RetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL = 1000 * 30; + +/** +* Represents the default client retry count. +*/ +RetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT = 3; + +/** +* Handles an operation with a retry policy. +* +* @param {Object} requestOptions The original request options. +* @param {function} next The next filter to be handled. +*/ +RetryPolicyFilter.prototype.handle = function (requestOptions, next) { + RetryPolicyFilter._handle(this, requestOptions, next); +}; + +/** +* Handles an operation with a retry policy. +* +* @param {Object} requestOptions The original request options. +* @param {function} next The next filter to be handled. +*/ +RetryPolicyFilter._handle = function (self, requestOptions, next) { + + var retryRequestOptions = extend({}, requestOptions); + retryRequestOptions.retryInterval = 0; + + // Initialize retryContext because that will be passed to the shouldRetry method which users will implement + retryRequestOptions.retryContext = { + retryCount: 0, + error: null, + retryInterval: retryRequestOptions.retryInterval, + locationMode: retryRequestOptions.locationMode, + currentLocation: retryRequestOptions.currentLocation + }; + + var lastPrimaryAttempt; + var lastSecondaryAttempt; + var operation = function () { + // retry policies dont really do anything to the request options + // so move on to next + if (next) { + next(retryRequestOptions, function (returnObject, finalCallback, nextPostCallback) { + // Previous operation ended so update the retry data + if (returnObject.error) { + if (retryRequestOptions.retryContext.error) { + returnObject.error.innerError = retryRequestOptions.retryContext.error; + } + + retryRequestOptions.retryContext.error = returnObject.error; + } + + // If a request sent to the secondary location fails with 404 (Not Found), it is possible + // that the resource replication is not finished yet. So, in case of 404 only in the secondary + // location, the failure should still be retryable. + var secondaryNotFound = (retryRequestOptions.currentLocation === Constants.StorageLocation.SECONDARY) && ((returnObject.response && returnObject.response.statusCode === 404) || (returnObject.error && returnObject.error.code === 'ENOTFOUND')); + + var notExceedMaxRetryCount = retryRequestOptions.retryContext.retryCount ? retryRequestOptions.retryContext.retryCount <= self.retryCount : true; + var retryInfo = self.shouldRetry(secondaryNotFound ? 500 : (azureutil.objectIsNull(returnObject.response) ? 306 : returnObject.response.statusCode), retryRequestOptions); + retryRequestOptions.retryContext.retryCount++; + + if (retryInfo.ignore) { + returnObject.error = null; + } + + // If the custom retry logic(shouldRetry) does not return a targetLocation, calculate based on the previous location and locationMode. + if(azureutil.objectIsNull(retryInfo.targetLocation)) { + retryInfo.targetLocation = azureutil.getNextLocation(retryRequestOptions.currentLocation, retryRequestOptions.locationMode); + } + + // If the custom retry logic(shouldRetry) does not return a retryInterval, try to set it to the value on the instance if it is available. Otherwise, the default(30000) will be used. + if(azureutil.objectIsNull(retryInfo.retryInterval)) { + retryInfo.retryInterval = self.retryInterval; + } + + // Only in the case of success from server but client side failure like MD5 or length mismatch, returnObject.retryable has a value(we explicitly set it to false). + // In this case, we should not retry the request. + // If the output stream already get sent to server and get error back, + // we should NOT retry within the SDK as the stream data is not valid anymore if we retry directly. + if ( + !returnObject.outputStreamSent && returnObject.error && azureutil.objectIsNull(returnObject.retryable) && notExceedMaxRetryCount && + ( + (!azureutil.objectIsNull(returnObject.response) && retryInfo.retryable) || + ( + returnObject.error.code === 'ECONNREFUSED' || + returnObject.error.code === 'ETIMEDOUT' || + returnObject.error.code === 'ESOCKETTIMEDOUT' || + returnObject.error.code === 'ECONNRESET' || + returnObject.error.code === 'EAI_AGAIN' || + returnObject.error.message === 'XHR error' // stream-http XHR network error message in browsers + ) + ) + ) { + if (retryRequestOptions.currentLocation === Constants.StorageLocation.PRIMARY) { + lastPrimaryAttempt = returnObject.operationEndTime; + } else { + lastSecondaryAttempt = returnObject.operationEndTime; + } + + // Moreover, in case of 404 when trying the secondary location, instead of retrying on the + // secondary, further requests should be sent only to the primary location, as it most + // probably has a higher chance of succeeding there. + if (secondaryNotFound && (retryRequestOptions.locationMode !== StorageUtilities.LocationMode.SECONDARY_ONLY)) + { + retryInfo.locationMode = StorageUtilities.LocationMode.PRIMARY_ONLY; + retryInfo.targetLocation = Constants.StorageLocation.PRIMARY; + } + + // Now is the time to calculate the exact retry interval. ShouldRetry call above already + // returned back how long two requests to the same location should be apart from each other. + // However, for the reasons explained above, the time spent between the last attempt to + // the target location and current time must be subtracted from the total retry interval + // that ShouldRetry returned. + var lastAttemptTime = retryInfo.targetLocation === Constants.StorageLocation.PRIMARY ? lastPrimaryAttempt : lastSecondaryAttempt; + if (!azureutil.objectIsNull(lastAttemptTime)) { + var sinceLastAttempt = new Date().getTime() - lastAttemptTime.getTime(); + if (sinceLastAttempt < 0) { + sinceLastAttempt = 0; + } + + retryRequestOptions.retryInterval = retryInfo.retryInterval - sinceLastAttempt; + } + else { + retryRequestOptions.retryInterval = 0; + } + + if(!azureutil.objectIsNull(retryInfo.locationMode)) { + retryRequestOptions.locationMode = retryInfo.locationMode; + } + + retryRequestOptions.currentLocation = retryInfo.targetLocation; + operation(); + } else { + if (nextPostCallback) { + nextPostCallback(returnObject); + } else if (finalCallback) { + finalCallback(returnObject); + } + } + }); + } + }; + + operation(); +}; + +RetryPolicyFilter._shouldRetryOnError = function (statusCode, requestOptions) { + var retryInfo = (requestOptions && requestOptions.retryContext) ? requestOptions.retryContext : {}; + + // Non-timeout Cases + if (statusCode >= 300 && statusCode != 408) { + // Always no retry on "not implemented" and "version not supported" + if (statusCode == 501 || statusCode == 505) { + retryInfo.retryable = false; + return retryInfo; + } + + // When absorbConditionalErrorsOnRetry is set (for append blob) + if (requestOptions && requestOptions.absorbConditionalErrorsOnRetry) { + if (statusCode == 412) { + // When appending block with precondition failure and their was a server error before, we ignore the error. + if (retryInfo.lastServerError) { + retryInfo.ignore = true; + retryInfo.retryable = true; + } else { + retryInfo.retryable = false; + } + } else if (statusCode >= 500 && statusCode < 600) { + // Retry on the server error + retryInfo.retryable = true; + retryInfo.lastServerError = true; + } + } else if (statusCode < 500) { + // No retry on the client error + retryInfo.retryable = false; + } + } + + return retryInfo; +}; + +module.exports = RetryPolicyFilter; diff --git a/src/node_modules/azure-storage/lib/common/http/webresource.js b/src/node_modules/azure-storage/lib/common/http/webresource.js new file mode 100644 index 0000000..3c50153 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/http/webresource.js @@ -0,0 +1,324 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureutil = require('../util/util'); +var SR = require('../util/sr'); +var Constants = require('../util/constants'); +var errors = require('../errors/errors'); +var ArgumentError = errors.ArgumentError; +var HeaderConstants = Constants.HeaderConstants; +var HttpConstants = Constants.HttpConstants; +var HttpConstants = Constants.HttpConstants; +var HttpVerbs = HttpConstants.HttpVerbs; + +function encodeSpecialCharacters(path) { + return path.replace(/'/g, '%27'); +} + +/** +* Creates a new WebResource object. +* +* This class provides an abstraction over a REST call by being library / implementation agnostic and wrapping the necessary +* properties to initiate a request. +* +* @constructor +*/ +function WebResource() { + this.rawResponse = false; + this.queryString = {}; +} + +/** +* Creates a new put request web resource. +* +* @param {string} path The path for the put operation. +* @return {WebResource} A new webresource with a put operation for the given path. +*/ +WebResource.put = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.PUT; + return webResource; +}; + +/** +* Creates a new get request web resource. +* +* @param {string} path The path for the get operation. +* @return {WebResource} A new webresource with a get operation for the given path. +*/ +WebResource.get = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.GET; + return webResource; +}; + +/** +* Creates a new head request web resource. +* +* @param {string} path The path for the head operation. +* @return {WebResource} A new webresource with a head operation for the given path. +*/ +WebResource.head = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.HEAD; + return webResource; +}; + +/** +* Creates a new delete request web resource. +* +* @param {string} path The path for the delete operation. +* @return {WebResource} A new webresource with a delete operation for the given path. +*/ +WebResource.del = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.DELETE; + return webResource; +}; + +/** +* Creates a new post request web resource. +* +* @param {string} path The path for the post operation. +* @return {WebResource} A new webresource with a post operation for the given path. +*/ +WebResource.post = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.POST; + return webResource; +}; + +/** +* Creates a new merge request web resource. +* +* @param {string} path The path for the merge operation. +* @return {WebResource} A new webresource with a merge operation for the given path. +*/ +WebResource.merge = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.MERGE; + return webResource; +}; + +/** +* Specifies a custom property in the web resource. +* +* @param {string} name The property name. +* @param {string} value The property value. +* @return {WebResource} The webresource. +*/ +WebResource.prototype.withProperty = function (name, value) { + if (!this.properties) { + this.properties = {}; + } + + this.properties[name] = value; + + return this; +}; + +/** +* Specifies if the response should be parsed or not. +* +* @param {bool} rawResponse true if the response should not be parsed; false otherwise. +* @return {WebResource} The webresource. +*/ +WebResource.prototype.withRawResponse = function (rawResponse) { + this.rawResponse = rawResponse; + + if (azureutil.objectIsNull(this.rawResponse)) { + this.rawResponse = true; + } + + return this; +}; + +WebResource.prototype.withHeadersOnly = function (headersOnly) { + if (headersOnly !== undefined) { + this.headersOnly = headersOnly; + } else { + this.headersOnly = true; + } + + return this; +}; + +/** +* Adds an optional query string parameter. +* +* @param {Object} name The name of the query string parameter. +* @param {Object} value The value of the query string parameter. +* @param {Object} defaultValue The default value for the query string parameter to be used if no value is passed. +* @return {Object} The web resource. +*/ +WebResource.prototype.withQueryOption = function (name, value, defaultValue) { + if (!azureutil.objectIsNull(value)) { + this.queryString[name] = value; + } else if (defaultValue) { + this.queryString[name] = defaultValue; + } + + return this; +}; + +/** +* Adds optional query string parameters. +* +* Additional arguments will be the needles to search in the haystack. +* +* @param {Object} object The haystack of query string parameters. +* @return {Object} The web resource. +*/ +WebResource.prototype.withQueryOptions = function (object) { + if (object) { + for (var i = 1; i < arguments.length; i++) { + if (object[arguments[i]]) { + this.withQueryOption(arguments[i], object[arguments[i]]); + } + } + } + + return this; +}; + +/** +* Adds an optional header parameter. +* +* @param {Object} name The name of the header parameter. +* @param {Object} value The value of the header parameter. +* @return {Object} The web resource. +*/ +WebResource.prototype.withHeader = function (name, value) { + if (!this.headers) { + this.headers = {}; + } + + if (!azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(value)) { + value = value instanceof Date ? value.toUTCString() : value; + + this.headers[name] = value; + } + + return this; +}; + +/** +* Adds an optional body. +* +* @param {Object} body The request body. +* @return {Object} The web resource. +*/ +WebResource.prototype.withBody = function (body) { + this.body = body; + return this; +}; + +/** +* Adds optional query string parameters. +* +* Additional arguments will be the needles to search in the haystack. +* +* @param {Object} object The haystack of headers. +* @return {Object} The web resource. +*/ +WebResource.prototype.withHeaders = function (object) { + if (object) { + for (var i = 1; i < arguments.length; i++) { + if (object[arguments[i]]) { + this.withHeader(arguments[i], object[arguments[i]]); + } + } + } + + return this; +}; + +WebResource.prototype.addOptionalMetadataHeaders = function (metadata) { + var self = this; + + if (metadata) { + Object.keys(metadata).forEach(function (metadataKey) { + if (azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(metadataKey)) { + throw new ArgumentError('metadata', SR.METADATA_KEY_INVALID); + } + + var value = metadata[metadataKey]; + if (azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(value)) { + throw new ArgumentError('metadata', SR.METADATA_VALUE_INVALID); + } + + var metadataHeaderName = HeaderConstants.PREFIX_FOR_STORAGE_METADATA + metadataKey; + var existingMetadataHeaderName = ''; + var headers = self.headers ? self.headers : {}; + if (Object.keys(headers).some(function (headerName) { + existingMetadataHeaderName = headerName; + return headerName.toString().toLowerCase() === metadataHeaderName.toLowerCase(); + })) { + self.withHeader(existingMetadataHeaderName, self.headers[existingMetadataHeaderName] + ',' + value); + } else { + self.withHeader(metadataHeaderName, value); + } + }); + } + + return this; +}; + +/** +* Determines if a status code corresponds to a valid response according to the WebResource's expected status codes. +* +* @param {int} statusCode The response status code. +* @return true if the response is valid; false otherwise. +*/ +WebResource.validResponse = function (statusCode) { + if (statusCode >= 200 && statusCode < 300) { + return true; + } + + return false; +}; + +function isMethodWithBody(verb) { + return verb === HttpVerbs.PUT || + verb === HttpVerbs.POST || + verb === HttpVerbs.MERGE; +} + +/** +* Hook up the given input stream to a destination output stream if the WebResource method +* requires a request body and a body is not already set. +* +* @param {Stream} inputStream the stream to pipe from +* @param {Stream} outputStream the stream to pipe to +* +* @return destStream +*/ +WebResource.prototype.pipeInput = function(inputStream, destStream) { + if (isMethodWithBody(this.method) && !this.hasOwnProperty('body')) { + inputStream.pipe(destStream); + } + + return destStream; +}; + +module.exports = WebResource; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/md5-wrapper/md5.browser.js b/src/node_modules/azure-storage/lib/common/md5-wrapper/md5.browser.js new file mode 100644 index 0000000..d0690f3 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/md5-wrapper/md5.browser.js @@ -0,0 +1,26 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var MD5 = require('md5.js'); + +var Md5Wrapper = function () { +}; + +Md5Wrapper.prototype.createMd5Hash = function() { + return new MD5(); +}; + +module.exports = Md5Wrapper; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/md5-wrapper/md5.node.js b/src/node_modules/azure-storage/lib/common/md5-wrapper/md5.node.js new file mode 100644 index 0000000..bf4e5f6 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/md5-wrapper/md5.node.js @@ -0,0 +1,26 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var crypto = require('crypto'); + +var Md5Wrapper = function () { +}; + +Md5Wrapper.prototype.createMd5Hash = function() { + return crypto.createHash('md5'); +}; + +module.exports = Md5Wrapper; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/md5-wrapper/package.json b/src/node_modules/azure-storage/lib/common/md5-wrapper/package.json new file mode 100644 index 0000000..17de3eb --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/md5-wrapper/package.json @@ -0,0 +1,7 @@ +{ + "name": "azure-storage-md5-wrapper", + "author": "Microsoft Corporation", + "license": "Apache-2.0", + "main": "md5.node.js", + "browser": "md5.browser.js" +} \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/models/accountpropertiesresult.js b/src/node_modules/azure-storage/lib/common/models/accountpropertiesresult.js new file mode 100644 index 0000000..2e95092 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/models/accountpropertiesresult.js @@ -0,0 +1,33 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var Constants = require('../util/constants'); +var HeaderConstants = Constants.HeaderConstants; + +exports.parse = function (headers) { + var accountPropertiesResult = {}; + + if (headers[HeaderConstants.SKU_NAME]) { + accountPropertiesResult.SkuName = headers[HeaderConstants.SKU_NAME]; + } + + if (headers[HeaderConstants.ACCOUNT_KIND]) { + accountPropertiesResult.AccountKind = headers[HeaderConstants.ACCOUNT_KIND]; + } + + return accountPropertiesResult; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/models/aclresult.js b/src/node_modules/azure-storage/lib/common/models/aclresult.js new file mode 100644 index 0000000..d6d2a67 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/models/aclresult.js @@ -0,0 +1,121 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = require('underscore'); +var xmlbuilder = require('xmlbuilder'); + +var azureutil = require('../util/util'); +var ISO8061Date = require('../util/iso8061date'); +var Constants = require('../util/constants'); +var AclConstants = Constants.AclConstants; + +exports = module.exports; + +/** +* Builds an XML representation for container acl permissions. +* +* @param {Object.} entity The signed identifiers. +* @return {string} The XML container acl permissions. +*/ +exports.serialize = function (signedIdentifiersJs) { + var doc = xmlbuilder.create(AclConstants.SIGNED_IDENTIFIERS_ELEMENT, { version: '1.0', encoding: 'utf-8' }); + + var keys = Object.keys(signedIdentifiersJs); + if (keys.length > 0) { + keys.forEach(function (key) { + var accessPolicy = signedIdentifiersJs[key]; + doc = doc + .ele(AclConstants.SIGNED_IDENTIFIER_ELEMENT) + .ele(AclConstants.ID) + .txt(key) + .up() + .ele(AclConstants.ACCESS_POLICY); + + if (accessPolicy.Start) { + var startIsoString = accessPolicy.Start; + if (!_.isDate(startIsoString)) { + startIsoString = new Date(startIsoString); + } + + // Convert to expected ISO 8061 date format + startIsoString = ISO8061Date.format(startIsoString); + + doc = doc + .ele(AclConstants.START) + .txt(startIsoString) + .up(); + } + + if (accessPolicy.Expiry) { + var expiryIsoString = accessPolicy.Expiry; + if (!_.isDate(expiryIsoString)) { + expiryIsoString = new Date(expiryIsoString); + } + + // Convert to expected ISO 8061 date format + expiryIsoString = ISO8061Date.format(expiryIsoString); + + doc = doc + .ele(AclConstants.EXPIRY) + .txt(expiryIsoString) + .up(); + } + + if (accessPolicy.Permissions) { + doc = doc + .ele(AclConstants.PERMISSION) + .txt(accessPolicy.Permissions) + .up(); + } + + doc = doc.up().up(); + }); + } + return doc.doc().toString(); +}; + +exports.parse = function (signedIdentifiersXml) { + var signedIdentifiers = {}; + + signedIdentifiersXml = azureutil.tryGetValueChain(signedIdentifiersXml, [ 'SignedIdentifiers', 'SignedIdentifier' ]); + if (signedIdentifiersXml) { + if (!_.isArray(signedIdentifiersXml)) { + signedIdentifiersXml = [ signedIdentifiersXml ]; + } + + signedIdentifiersXml.forEach(function (signedIdentifier) { + var accessPolicy = {}; + if (signedIdentifier.AccessPolicy) { + if (signedIdentifier.AccessPolicy.Start) { + accessPolicy.Start = ISO8061Date.parse(signedIdentifier.AccessPolicy.Start); + } + + if (signedIdentifier.AccessPolicy.Expiry) { + accessPolicy.Expiry = ISO8061Date.parse(signedIdentifier.AccessPolicy.Expiry); + } + + if (signedIdentifier.AccessPolicy.Permission) { + accessPolicy.Permissions = signedIdentifier.AccessPolicy.Permission; + } + } + + signedIdentifiers[signedIdentifier.Id] = accessPolicy; + }); + } + + return signedIdentifiers; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/models/servicepropertiesresult.js b/src/node_modules/azure-storage/lib/common/models/servicepropertiesresult.js new file mode 100644 index 0000000..80629a5 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/models/servicepropertiesresult.js @@ -0,0 +1,486 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = require('underscore'); +var xmlbuilder = require('xmlbuilder'); + +var Constants = require('../util/constants'); +var ServicePropertiesConstants = Constants.ServicePropertiesConstants; + +exports = module.exports; + +function serializeRetentionPolicy(doc, policy) { + if (policy !== null) { + if (typeof policy === 'undefined') { + policy = {}; + } + + doc = doc.ele(ServicePropertiesConstants.RETENTION_POLICY_ELEMENT); + if (typeof policy.Enabled !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(policy.Enabled) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(false) + .up(); + } + + if (typeof policy.Days !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DAYS_ELEMENT) + .txt(policy.Days) + .up(); + } else if (policy.Enabled === true) { + doc = doc.ele(ServicePropertiesConstants.DAYS_ELEMENT) + .txt(1) + .up(); + } + + doc = doc.up(); + } +} + +function serializeDeleteRetentionPolicy(doc, policy) { + if (policy !== null) { + if (typeof policy === 'undefined') { + policy = {}; + } + + if (typeof policy.Enabled !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(policy.Enabled) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(false) + .up(); + } + + if (typeof policy.Days !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DAYS_ELEMENT) + .txt(policy.Days) + .up(); + } else if (policy.Enabled === true) { + doc = doc.ele(ServicePropertiesConstants.DAYS_ELEMENT) + .txt(1) + .up(); + } + + doc = doc.up(); + } +} + +function serializeStaticWebsite(doc, staticWebsite) { + if (staticWebsite !== null) { + if (typeof staticWebsite === 'undefined') { + staticWebsite = {}; + } + + if (typeof staticWebsite.Enabled !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(staticWebsite.Enabled) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(false) + .up(); + } + + if (typeof staticWebsite.IndexDocument !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_INDEX_DOCUMENT_ELEMENT) + .txt(staticWebsite.IndexDocument) + .up(); + } + + if (typeof staticWebsite.ErrorDocument404Path !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_ERROR_DOCUMENT_404_PATH_ELEMENT) + .txt(staticWebsite.ErrorDocument404Path) + .up(); + } + + doc = doc.up(); + } +} + +function serializeLogging(doc, logging) { + if (typeof logging.Version !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.VERSION_ELEMENT) + .txt(logging.Version) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.VERSION_ELEMENT) + .txt(ServicePropertiesConstants.DEFAULT_ANALYTICS_VERSION) + .up(); + } + + if (typeof logging.Delete !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DELETE_ELEMENT) + .txt(logging.Delete) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.DELETE_ELEMENT) + .txt(false) + .up(); + } + + if (typeof logging.Read !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.READ_ELEMENT) + .txt(logging.Read) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.READ_ELEMENT) + .txt(false) + .up(); + } + + if (typeof logging.Write !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.WRITE_ELEMENT) + .txt(logging.Write) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.WRITE_ELEMENT) + .txt(false) + .up(); + } + + serializeRetentionPolicy(doc, logging.RetentionPolicy); + + doc = doc.up(); +} + +function serializeMetrics(doc, metrics) { + if (typeof metrics.Version !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.VERSION_ELEMENT) + .txt(metrics.Version) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.VERSION_ELEMENT) + .txt(ServicePropertiesConstants.DEFAULT_ANALYTICS_VERSION) + .up(); + } + + if (typeof metrics.Enabled !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(metrics.Enabled) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(false) + .up(); + } + + if (metrics.Enabled) { + if (typeof metrics.IncludeAPIs !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.INCLUDE_APIS_ELEMENT) + .txt(metrics.IncludeAPIs) + .up(); + } else if (metrics.Enabled === true) { + doc = doc.ele(ServicePropertiesConstants.INCLUDE_APIS_ELEMENT) + .txt(false) + .up(); + } + } + serializeRetentionPolicy(doc, metrics.RetentionPolicy); +} + +function serializeCorsRules(doc, rules) { + if (typeof rules !== 'undefined' && rules !== null && _.isArray(rules)) { + rules.forEach(function (rule) { + doc = doc.ele(ServicePropertiesConstants.CORS_RULE_ELEMENT); + + if (typeof rule.AllowedMethods !== 'undefined' && _.isArray(rule.AllowedMethods)) { + doc = doc.ele(ServicePropertiesConstants.ALLOWED_METHODS_ELEMENT) + .txt(rule.AllowedMethods.join(',')) + .up(); + } + + if (typeof rule.AllowedOrigins !== 'undefined' && _.isArray(rule.AllowedOrigins)) { + doc = doc.ele(ServicePropertiesConstants.ALLOWED_ORIGINS_ELEMENT) + .txt(rule.AllowedOrigins.join(',')) + .up(); + } + + if (typeof rule.AllowedHeaders !== 'undefined' && _.isArray(rule.AllowedHeaders)) { + doc = doc.ele(ServicePropertiesConstants.ALLOWED_HEADERS_ELEMENT) + .txt(rule.AllowedHeaders.join(',')) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ALLOWED_HEADERS_ELEMENT) + .txt('') + .up(); + } + + if (typeof rule.ExposedHeaders !== 'undefined' && _.isArray(rule.ExposedHeaders)) { + doc = doc.ele(ServicePropertiesConstants.EXPOSED_HEADERS_ELEMENT) + .txt(rule.ExposedHeaders.join(',')) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.EXPOSED_HEADERS_ELEMENT) + .txt('') + .up(); + } + + if (typeof rule.MaxAgeInSeconds !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.MAX_AGE_IN_SECONDS_ELEMENT) + .txt(rule.MaxAgeInSeconds) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.MAX_AGE_IN_SECONDS_ELEMENT) + .txt('0') + .up(); + } + + doc = doc.up(); + }); + } +} + +exports.serialize = function (servicePropertiesJs) { + var doc = xmlbuilder.create(ServicePropertiesConstants.STORAGE_SERVICE_PROPERTIES_ELEMENT, { version: '1.0', encoding: 'utf-8' }); + + if (servicePropertiesJs.Logging) { + doc = doc.ele(ServicePropertiesConstants.LOGGING_ELEMENT); + serializeLogging(doc, servicePropertiesJs.Logging); + doc = doc.up(); + } + + if (servicePropertiesJs.HourMetrics) { + doc = doc.ele(ServicePropertiesConstants.HOUR_METRICS_ELEMENT); + serializeMetrics(doc, servicePropertiesJs.HourMetrics); + doc = doc.up(); + } + + if (servicePropertiesJs.MinuteMetrics) { + doc = doc.ele(ServicePropertiesConstants.MINUTE_METRICS_ELEMENT); + serializeMetrics(doc, servicePropertiesJs.MinuteMetrics); + doc = doc.up(); + } + + if (servicePropertiesJs.Cors) { + doc = doc.ele(ServicePropertiesConstants.CORS_ELEMENT); + serializeCorsRules(doc, servicePropertiesJs.Cors.CorsRule); + doc = doc.up(); + } + + if (servicePropertiesJs.DefaultServiceVersion) { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_SERVICE_VERSION_ELEMENT) + .txt(servicePropertiesJs.DefaultServiceVersion) + .up(); + } + + if (servicePropertiesJs.DeleteRetentionPolicy) { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_DELETE_RETENTION_POLICY_ELEMENT); + serializeDeleteRetentionPolicy(doc, servicePropertiesJs.DeleteRetentionPolicy); + doc = doc.up(); + } + + if (servicePropertiesJs.StaticWebsite) { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_STATIC_WEBSITE_ELEMENT); + serializeStaticWebsite(doc, servicePropertiesJs.StaticWebsite); + doc = doc.up(); + } + + return doc.doc().toString(); +}; + +function parseRetentionPolicy(policyXml) { + var policy = {}; + + if (typeof policyXml.Enabled !== 'undefined') { + policy.Enabled = policyXml.Enabled === 'true'; + } + + if (typeof policyXml.Days !== 'undefined') { + policy.Days = parseInt(policyXml.Days, 10); + } + + return policy; +} + +function parseLogging(loggingXml) { + var logging = {}; + + if (typeof loggingXml.Version !== 'undefined') { + logging.Version = loggingXml.Version; + } + + if (typeof loggingXml.Delete !== 'undefined') { + logging.Delete = loggingXml.Delete === 'true'; + } + + if (typeof loggingXml.Read !== 'undefined') { + logging.Read = loggingXml.Read === 'true'; + } + + if (typeof loggingXml.Write !== 'undefined') { + logging.Write = loggingXml.Write === 'true'; + } + + if (typeof loggingXml.RetentionPolicy !== 'undefined') { + logging.RetentionPolicy = parseRetentionPolicy(loggingXml.RetentionPolicy); + } + + return logging; +} + +function parseMetrics(metricsXml) { + var metrics = {}; + + if (typeof metricsXml.Version !== 'undefined') { + metrics.Version = metricsXml.Version; + } + + if (typeof metricsXml.Enabled !== 'undefined') { + metrics.Enabled = metricsXml.Enabled === 'true'; + } + + if (typeof metricsXml.IncludeAPIs !== 'undefined') { + metrics.IncludeAPIs = metricsXml.IncludeAPIs === 'true'; + } + + if (typeof metricsXml.RetentionPolicy !== 'undefined') { + metrics.RetentionPolicy = parseRetentionPolicy(metricsXml.RetentionPolicy); + } + + return metrics; +} + +function parseCors(corsXml) { + var cors = {}; + + if (typeof corsXml.CorsRule !== 'undefined') { + var rulesXml = corsXml.CorsRule; + if (!_.isArray(rulesXml)) { + rulesXml = [rulesXml]; + } + + cors.CorsRule = []; + rulesXml.forEach(function (ruleXml) { + var rule = {}; + + if (typeof ruleXml.AllowedMethods !== 'undefined') { + if (ruleXml.AllowedMethods !== '') { + rule.AllowedMethods = ruleXml.AllowedMethods.split(','); + } + else { + rule.AllowedMethods = []; + } + } + + if (typeof ruleXml.AllowedOrigins !== 'undefined') { + if (ruleXml.AllowedOrigins !== '') { + rule.AllowedOrigins = ruleXml.AllowedOrigins.split(','); + } + else { + rule.AllowedOrigins = []; + } + } + + if (typeof ruleXml.AllowedHeaders !== 'undefined') { + if (ruleXml.AllowedHeaders !== '') { + rule.AllowedHeaders = ruleXml.AllowedHeaders.split(','); + } + else { + rule.AllowedHeaders = []; + } + } + + if (typeof ruleXml.ExposedHeaders !== 'undefined') { + if (ruleXml.ExposedHeaders !== '') { + rule.ExposedHeaders = ruleXml.ExposedHeaders.split(','); + } + else { + rule.ExposedHeaders = []; + } + } + + if (typeof ruleXml.MaxAgeInSeconds !== 'undefined') { + rule.MaxAgeInSeconds = parseInt(ruleXml.MaxAgeInSeconds, 10); + } + + cors.CorsRule.push(rule); + }); + } + + return cors; +} + +function parseDeleteRetentionPolicy(deleteRetentionPolicyXml) { + var deleteRetentionPolicy = {}; + + if (typeof deleteRetentionPolicyXml.Enabled !== 'undefined') { + deleteRetentionPolicy.Enabled = deleteRetentionPolicyXml.Enabled === 'true'; + } + + if (typeof deleteRetentionPolicyXml.Days !== 'undefined') { + deleteRetentionPolicy.Days = parseInt(deleteRetentionPolicyXml.Days); + } + + return deleteRetentionPolicy; +} + +function parseStaticWebsite(staticWebsiteXml) { + var staticWebsite = {}; + + if (typeof staticWebsiteXml.Enabled !== 'undefined') { + staticWebsite.Enabled = staticWebsiteXml.Enabled === 'true'; + } + + if (typeof staticWebsiteXml.IndexDocument !== 'undefined') { + staticWebsite.IndexDocument = staticWebsiteXml.IndexDocument; + } + + if (typeof staticWebsiteXml.ErrorDocument404Path !== 'undefined') { + staticWebsite.ErrorDocument404Path = staticWebsiteXml.ErrorDocument404Path; + } + + return staticWebsite; +} + +exports.parse = function (servicePropertiesXml) { + var serviceProperties = {}; + + if (typeof servicePropertiesXml.Logging !== 'undefined') { + serviceProperties.Logging = parseLogging(servicePropertiesXml.Logging); + } + + if (typeof servicePropertiesXml.HourMetrics !== 'undefined') { + serviceProperties.HourMetrics = parseMetrics(servicePropertiesXml.HourMetrics); + } + + if (typeof servicePropertiesXml.MinuteMetrics !== 'undefined') { + serviceProperties.MinuteMetrics = parseMetrics(servicePropertiesXml.MinuteMetrics); + } + + if (typeof servicePropertiesXml.Cors !== 'undefined') { + serviceProperties.Cors = parseCors(servicePropertiesXml.Cors); + } + + if (typeof servicePropertiesXml.DefaultServiceVersion !== 'undefined') { + serviceProperties.DefaultServiceVersion = servicePropertiesXml.DefaultServiceVersion; + } + + if (typeof servicePropertiesXml.DeleteRetentionPolicy !== 'undefined') { + serviceProperties.DeleteRetentionPolicy = parseDeleteRetentionPolicy(servicePropertiesXml.DeleteRetentionPolicy); + } + + if (typeof servicePropertiesXml.StaticWebsite !== 'undefined') { + serviceProperties.StaticWebsite = parseStaticWebsite(servicePropertiesXml.StaticWebsite); + } + + return serviceProperties; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/models/servicestatsparser.js b/src/node_modules/azure-storage/lib/common/models/servicestatsparser.js new file mode 100644 index 0000000..3ebf342 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/models/servicestatsparser.js @@ -0,0 +1,34 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +exports = module.exports; + +exports.parse = function (serviceStatsXml) { + var serviceStats = {}; + + if (typeof serviceStatsXml.GeoReplication !== 'undefined') { + serviceStats.GeoReplication = {}; + + if (typeof serviceStatsXml.GeoReplication.Status !== 'undefined') { + serviceStats.GeoReplication.Status = serviceStatsXml.GeoReplication.Status; + } + + if (typeof serviceStatsXml.GeoReplication.LastSyncTime !== 'undefined' && serviceStatsXml.GeoReplication.LastSyncTime !== '') { + serviceStats.GeoReplication.LastSyncTime = new Date(serviceStatsXml.GeoReplication.LastSyncTime); + } + } + + return serviceStats; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/models/tokencredential.js b/src/node_modules/azure-storage/lib/common/models/tokencredential.js new file mode 100644 index 0000000..6443798 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/models/tokencredential.js @@ -0,0 +1,53 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* Creates a new TokenCredential object. +* @class +* The TokenCredential class is used to store the access token string. +* +* @constructor +* @param {string} token The access token, such as an OAuth access token in string type. +* +* @example +* var azure = require('azure-storage'); +* var tokenCredential = new azure.TokenCredential('myOAuthAccessToken'); +* var blobService = azure.createBlobServiceWithTokenCredential('https://account.blob.core.windows.net', tokenCredential); +* tokenCredential.set('updatedOAuthAccessToken'); +*/ +function TokenCredential (token) { + this.token = token; +} + +/** +* Get current access token. +* +* @return {string} The current access token in string type. +*/ +TokenCredential.prototype.get = function () { + return this.token; +}; + +/** +* Renew the access token. +* +* @param {string} token The new access token in string. +*/ +TokenCredential.prototype.set = function (token) { + this.token = token; +}; + +module.exports = TokenCredential; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/request-wrapper/package.json b/src/node_modules/azure-storage/lib/common/request-wrapper/package.json new file mode 100644 index 0000000..00913d0 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/request-wrapper/package.json @@ -0,0 +1,7 @@ +{ + "name": "azure-storage-request-wrapper", + "author": "Microsoft Corporation", + "license": "Apache-2.0", + "main": "request.node.js", + "browser": "request.browser.js" +} \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/request-wrapper/request.browser.js b/src/node_modules/azure-storage/lib/common/request-wrapper/request.browser.js new file mode 100644 index 0000000..b71aa8f --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/request-wrapper/request.browser.js @@ -0,0 +1,258 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var azureCommon = require('../common.browser'); +var Duplex = require('stream').Duplex; +var extend = require('extend'); +var http = require('http'); +var https = require('https'); +var url = require('url'); +var util = require('util'); + +/** +* Creates a new Request object. +* +* @constructor +* @param {object} options The options for a Request object. +* @param {function} callback Callback +*/ +function Request(options, callback) { + Duplex.call(this); + + this._init(options, callback); + this._send(); +} + +util.inherits(Request, Duplex); + +Request.prototype._init = function (options, callback) { + this.callback = callback; + + var nonReservedProperties = azureCommon.util.filterOutNonReservedProperties(this, options); + extend(this, nonReservedProperties); + + this.agent = this.agent || false; + this.timeout = this.timeout || Math.pow(2, 32) * 1000; + + this._initUri(); + this._initHeaders(); +}; + +Request.prototype._initUri = function () { + if (!this.uri) { + return this.emit('error', new Error('options.uri is a required argument')); + } + + if (typeof this.uri === 'string') { + this.uri = url.parse(this.uri); + } + + if (!this.uri.href) { + this.uri.href = url.format(this.uri); + } +}; + +Request.prototype._initHeaders = function () { + this.headers = this.headers || {}; + this.headers['content-length'] = this.headers['content-length'] || 0; +}; + +Request.prototype._send = function () { + this._sent = true; + + var protocol = this.uri.protocol || ''; + var iface = (protocol === 'https:' ? https : http); + + var options = { + scheme: protocol.replace(/:$/, ''), + method: this.method, + host: this.uri.hostname, + port: Number(this.uri.port) || (protocol === 'https:' ? 443 : 80), + path: this.uri.path, + agent: this.agent, + headers: this.headers, + withCredentials: this.withCredentials, + localAddress: this.localAddress, + mode: this.mode + }; + + if (protocol === 'https:') { + options.pfx = this.pfx; + options.key = this.key; + options.cert = this.cert; + options.ca = this.ca; + options.ciphers = this.ciphers; + options.rejectUnauthorized = this.rejectUnauthorized; + options.secureProtocol = this.secureProtocol; + } + + var httpRequest = iface.request(options); + if (this.timeout && httpRequest.setTimeout) { + httpRequest.setTimeout(this.timeout); + } + + this.httpRequest = httpRequest; + this.emit('request', httpRequest); + + this._sendBody(); + this._listenHttpResponse(); + this._listenHttpError(); + + return httpRequest; +}; + +Request.prototype._sendBody = function () { + if (this.body) { + this.httpRequest.write(this.body); + this.httpRequest.end(); + return; + } + + if (this.headers['content-length'] == '0') { + this.httpRequest.end(); + return; + } +}; + +Request.prototype._listenHttpError = function() { + var self = this; + + self.httpRequest.on('error', function(error) { + self.emit('error', error); + }); + + self.on('error', function(error) { + if (self.callback) { + self.callback(error); + } + }); +}; + +Request.prototype._listenHttpResponse = function () { + var self = this; + + self.httpRequest.on('response', function (response) { + var buffers = []; + var bufferLength = 0; + + self.response = response; + + response.on('data', function (chunk) { + self.push(chunk); + buffers.push(chunk); + bufferLength += chunk.length; + }); + + response.on('error', function (error) { + self.emit('error', error); + }); + + response.on('end', function () { + self.push(null); + + response.body = ''; + if (bufferLength > 0) { + response.body = Buffer.concat(buffers, bufferLength); + } + + if (self.encoding !== null) { + response.body = response.body.toString(self.encoding); + } + + if (self.callback) { + self.callback(null, response); + } + }); + + self.emit('response', response); + }); +}; + +/** +* Set a Request header. +* +* @param {string} key The user provided header key. +* @param {string} value The valid header value. +*/ +Request.prototype.setHeader = function (key, value) { + if (this._sent) { + throw new Error('Request already sent'); + } + + this.headers[key] = value; + return this; +}; + +/** +* Set a Request URI. +* +* @param {string} uri The user provided uri. +*/ +Request.prototype.setLocation = function (uri) { + this.uri = uri; + return this; +}; + +Request.prototype.end = function (chunk) { + if (chunk) { + this.httpRequest.write(chunk); + } + + this.httpRequest.end(); +}; + +Request.prototype._write = function (chunk, encoding, callback) { + this.httpRequest.write(chunk); + callback(); +}; + +Request.prototype._read = function () { +}; + +/** +* Create a Request object +* @ignore +* +* @param {object} options Options +* @param {function} callback Callback +* @return {Request} The created request object. +*/ +function req(optionsOrCallback, callback) { + var reqDefaults = req.defaults(); + return reqDefaults(optionsOrCallback, callback); +} + +/** +* Create a Request creator with default options +* @ignore +* +* @param {object} defaultOptions Default options +* @return {function} +*/ +req.defaults = function (defaultOptions) { + return function (optionsOrCallback, callback) { + var options; + azureCommon.util.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + var nonReservedProperties = azureCommon.util.filterOutNonReservedProperties(options, defaultOptions); + extend(options, nonReservedProperties); + + var request = new Request(options, callback); + return request; + }; +}; + +module.exports = req; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/request-wrapper/request.node.js b/src/node_modules/azure-storage/lib/common/request-wrapper/request.node.js new file mode 100644 index 0000000..63e8845 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/request-wrapper/request.node.js @@ -0,0 +1,17 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +module.exports = require('request'); \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/services/servicesettings.js b/src/node_modules/azure-storage/lib/common/services/servicesettings.js new file mode 100644 index 0000000..5fca983 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/services/servicesettings.js @@ -0,0 +1,262 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = require('util'); + +var azureUtil = require('../util/util'); +var errors = require('../errors/errors'); +var SR = require('../util/sr'); +var Constants = require('../util/constants'); + +exports = module.exports; + +/** +* The default protocol. +*/ +exports.DEFAULT_PROTOCOL = Constants.HTTPS; + +var NoMatchError = function (msg, constr) { + errors.captureStackTrace(this, constr || this); + this.message = msg || 'Error'; +}; + +util.inherits(NoMatchError, Error); +NoMatchError.prototype.name = 'NoMatchError'; + +exports.NoMatchError = NoMatchError; + +/** +* Throws an exception if the connection string format does not match any of the +* available formats. +* +* @param {string} connectionString The invalid formatted connection string. +* @return none +*/ +exports.noMatchConnectionString = function (connectionString) { + throw new NoMatchError('The provided connection string "' + connectionString + '" does not have complete configuration settings.'); +}; + +/** +* Throws an exception if the settings dont match any of the +* available formats. +* +* @param {object} settings The invalid settings. +* @return none +*/ +exports.noMatchSettings = function (settings) { + throw new NoMatchError('The provided settings ' + JSON.stringify(settings) + ' are not complete.'); +}; + +/** +* Parses the connection string and then validate that the parsed keys belong to +* the validSettingKeys +* +* @param {string} connectionString The user provided connection string. +* @param {array} validKeys The valid keys. +* @return {array} The tokenized connection string keys. +*/ +exports.parseAndValidateKeys = function (connectionString, validKeys) { + var parsedConnectionString = { }; + + // parse key/value pairs from connection string + var pairs = connectionString.split(';'); + for (var m = 0; m < pairs.length; m++) { + if (pairs[m].length === 0) { + continue; + } + + var equalDex = pairs[m].indexOf('='); + if (equalDex < 0) { + throw new SyntaxError(SR.INVALID_CONNECTION_STRING); + } else if (equalDex === 0) { + // empty key name. + throw new SyntaxError(SR.INVALID_CONNECTION_STRING_EMPTY_KEY); + } + + var key = pairs[m].substring(0, equalDex); + + // assure that all given keys are valid. + if (!azureUtil.inArrayInsensitive(key, validKeys)) { + throw new SyntaxError(util.format(SR.INVALID_CONNECTION_STRING_BAD_KEY, key)); + } + + var value = pairs[m].substring(equalDex + 1); + + if(typeof parsedConnectionString[key] === 'undefined'){ + parsedConnectionString[key] = value; + } else { + // duplicate key name + throw new SyntaxError(util.format(SR.INVALID_CONNECTION_STRING_DUPLICATE_KEY, key)); + } + } + + return parsedConnectionString; +}; + +/** +* Creates an anonymous function that acts as predicate to perform a validation. +* +* @param array {requirements} The array of conditions to satisfy. +* @param boolean {isRequired} Either these conditions are all required or all +* optional. +* @param boolean {atLeastOne} Indicates that at least one requirement must +* succeed. +* @return {function} +*/ +exports.getValidator = function (requirements, isRequired, atLeastOne) { + return function (userSettings) { + var oneFound = false; + var result = { }; + + for (var key in userSettings) { + if (userSettings.hasOwnProperty(key)) { + result[key.toLowerCase()] = userSettings[key]; + } + } + + for (var requirement in requirements) { + if (requirements.hasOwnProperty(requirement)) { + var settingName = requirements[requirement].SettingName.toLowerCase(); + + // Check if the setting name exists in the provided user settings. + if (result[settingName]) { + // Check if the provided user setting value is valid. + var validationFunc = requirements[requirement].SettingConstraint; + var isValid = validationFunc(result[settingName]); + + if (isValid) { + // Remove the setting as indicator for successful validation. + delete result[settingName]; + oneFound = true; + } + } else if (isRequired) { + // If required then fail because the setting does not exist + return null; + } + } + } + + if (atLeastOne) { + // At least one requirement must succeed, otherwise fail. + return oneFound ? result : null; + } else { + return result; + } + }; +}; + +/** +* Creates a setting value condition that validates it is one of the +* passed valid values. +* +* @param {string} name The setting key name. +* @return {array} +*/ +exports.setting = function (name) { + var validValues = Array.prototype.slice.call(arguments, 1, arguments.length); + + var predicate = function (settingValue) { + var validValuesString = JSON.stringify(validValues); + if (validValues.length === 0) { + // No restrictions, succeed. + return true; + } + + // Check to find if the settingValue is valid or not. + for (var index = 0; index < validValues.length; index++) { + if (settingValue.toString() == validValues[index].toString()) { + // SettingValue is found in valid values set, succeed. + return true; + } + } + + // settingValue is missing in valid values set, fail. + throw new RangeError('The provided config value ' + settingValue + ' does not belong to the valid values subset:\n' + validValuesString); + }; + + return exports.settingWithFunc(name, predicate); +}; + +/** +* Creates an "at lease one" predicate for the provided list of requirements. +* +* @return callable +*/ +exports.atLeastOne = function () { + var allSettings = arguments; + return exports.getValidator(allSettings, false, true); +}; + +/** +* Creates an optional predicate for the provided list of requirements. +* +* @return {function} +*/ +exports.optional = function () { + var optionalSettings = arguments; + return exports.getValidator(optionalSettings, false, false); +}; + +/** +* Creates an required predicate for the provided list of requirements. +* +* @return {function} +*/ +exports.allRequired = function () { + var requiredSettings = arguments; + return exports.getValidator(requiredSettings, true, false); +}; + +/** +* Creates a setting value condition using the passed predicate. +* +* @param {string} name The setting key name. +* @param {function} predicate The setting value predicate. +* @return {array} +*/ +exports.settingWithFunc = function (name, predicate) { + var requirement = {}; + requirement.SettingName = name; + requirement.SettingConstraint = predicate; + + return requirement; +}; + + +/** +* Tests to see if a given list of settings matches a set of filters exactly. +* +* @param array $settings The settings to check. +* @return boolean If any filter returns null, false. If there are any settings +* left over after all filters are processed, false. Otherwise true. +*/ +exports.matchedSpecification = function (settings) { + var constraints = Array.prototype.slice.call(arguments, 1, arguments.length); + + for (var constraint in constraints) { + if (constraints.hasOwnProperty(constraint)) { + var remainingSettings = constraints[constraint](settings); + + if (!remainingSettings) { + return false; + } else { + settings = remainingSettings; + } + } + } + + return azureUtil.objectKeysLength(settings) === 0; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/services/storageserviceclient.js b/src/node_modules/azure-storage/lib/common/services/storageserviceclient.js new file mode 100644 index 0000000..3075c33 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/services/storageserviceclient.js @@ -0,0 +1,1369 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var request = require('../request-wrapper'); +var url = require('url'); +var qs = require('querystring'); +var util = require('util'); +var xml2js = require('xml2js'); +var events = require('events'); +var _ = require('underscore'); +var guid = require('uuid'); +var os = require('os'); +var extend = require('extend'); +var Parser = require('json-edm-parser'); + +var Md5Wrapper = require('../md5-wrapper'); +var azureutil = require('../util/util'); +var validate = require('../util/validate'); +var SR = require('../util/sr'); +var WebResource = require('../http/webresource'); +var BufferStream = require('../streams/bufferstream.js'); + +var ServiceSettings = require('./servicesettings'); +var StorageServiceSettings = require('./storageservicesettings'); +var Constants = require('../util/constants'); +var StorageUtilities = require('../util/storageutilities'); +var ServicePropertiesResult = require('../models/servicepropertiesresult'); +var TableUtilities = require('../../services/table/tableutilities'); + +var SharedKey = require('../signing/sharedkey'); +var SharedAccessSignature = require('../signing/sharedaccesssignature'); +var TokenSigner = require('../signing/tokensigner'); + +var HeaderConstants = Constants.HeaderConstants; +var QueryStringConstants = Constants.QueryStringConstants; +var HttpResponseCodes = Constants.HttpConstants.HttpResponseCodes; +var StorageServiceClientConstants = Constants.StorageServiceClientConstants; +var defaultRequestLocationMode = Constants.RequestLocationMode.PRIMARY_ONLY; +var RequestLocationMode = Constants.RequestLocationMode; + +var Logger = require('../diagnostics/logger'); +var errors = require('../errors/errors'); +var ArgumentError = errors.ArgumentError; +var ArgumentNullError = errors.ArgumentNullError; +var TimeoutError = errors.TimeoutError; +var StorageError = errors.StorageError; + +/** +* Creates a new StorageServiceClient object. +* +* @class +* The StorageServiceClient class is the base class of all the service classes. +* @constructor +* @param {string} storageAccount The storage account. +* @param {string} storageAccessKey The storage access key. +* @param {object} host The host for the service. +* @param {bool} usePathStyleUri Boolean value indicating wether to use path style uris. +* @param {string} sas The Shared Access Signature string. +* @param {TokenCredential} [token] The {@link TokenCredential} object, which can be created with an OAuth access token string. +*/ +function StorageServiceClient(storageAccount, storageAccessKey, host, usePathStyleUri, sas, token) { + StorageServiceClient['super_'].call(this); + + if(storageAccount && storageAccessKey) { + // account and key + this.storageAccount = storageAccount; + this.storageAccessKey = storageAccessKey; + this.storageCredentials = new SharedKey(this.storageAccount, this.storageAccessKey, usePathStyleUri); + } else if (sas) { + // sas + this.sasToken = sas; + this.storageCredentials = new SharedAccessSignature(sas); + } else if (token) { + // access token + this.token = token; + this.storageCredentials = new TokenSigner(token); + } else { + // anonymous + this.anonymous = true; + this.storageCredentials = { + signRequest: function(webResource, callback){ + // no op, anonymous access + callback(null); + } + }; + } + + if(host){ + this.setHost(host); + } + + this.apiVersion = HeaderConstants.TARGET_STORAGE_VERSION; + this.usePathStyleUri = usePathStyleUri; + + this._initDefaultFilter(); + + /** + * The logger of the service. To change the log level of the services, set the `[logger.level]{@link Logger#level}`. + * @name StorageServiceClient#logger + * @type Logger + * */ + this.logger = new Logger(Logger.LogLevels.INFO); + + this._setDefaultProxy(); + + this.xml2jsSettings = StorageServiceClient._getDefaultXml2jsSettings(); + this.defaultLocationMode = StorageUtilities.LocationMode.PRIMARY_ONLY; +} + +util.inherits(StorageServiceClient, events.EventEmitter); + +/** +* Gets the default xml2js settings. +* @ignore +* @return {object} The default settings +*/ +StorageServiceClient._getDefaultXml2jsSettings = function() { + var xml2jsSettings = _.clone(xml2js.defaults['0.2']); + + // these determine what happens if the xml contains attributes + xml2jsSettings.attrkey = Constants.TableConstants.XML_METADATA_MARKER; + xml2jsSettings.charkey = Constants.TableConstants.XML_VALUE_MARKER; + + // from xml2js guide: always put child nodes in an array if true; otherwise an array is created only if there is more than one. + xml2jsSettings.explicitArray = false; + + return xml2jsSettings; +}; + +/** +* Sets a host for the service. +* @ignore +* @param {string} host The host for the service. +*/ +StorageServiceClient.prototype.setHost = function (host) { + var parseHost = function(hostUri){ + var parsedHost; + if(!azureutil.objectIsNull(hostUri)) { + if(hostUri.indexOf('http') === -1 && hostUri.indexOf('//') !== 0){ + hostUri = '//' + hostUri; + } + parsedHost = url.parse(hostUri, false, true); + + if(!parsedHost.protocol){ + parsedHost.protocol = ServiceSettings.DEFAULT_PROTOCOL; + } + + if (!parsedHost.port) { + if (parsedHost.protocol === Constants.HTTPS) { + parsedHost.port = Constants.DEFAULT_HTTPS_PORT; + } else { + parsedHost.port = Constants.DEFAULT_HTTP_PORT; + } + } + + parsedHost = url.format({ + protocol: parsedHost.protocol, + port: parsedHost.port, + hostname: parsedHost.hostname, + pathname: parsedHost.pathname + }); + } + + return parsedHost; + }; + + validate.isValidHost(host); + + this.host = { + primaryHost: parseHost(host.primaryHost), + secondaryHost: parseHost(host.secondaryHost) + }; +}; + +/** +* Performs a REST service request through HTTP expecting an input stream. +* @ignore +* +* @param {WebResource} webResource The webresource on which to perform the request. +* @param {string} outputData The outgoing request data as a raw string. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {function} callback The response callback function. +*/ +StorageServiceClient.prototype.performRequest = function (webResource, outputData, options, callback) { + this._performRequest(webResource, { outputData: outputData }, options, callback); +}; + +/** +* Performs a REST service request through HTTP expecting an input stream. +* @ignore +* +* @param {WebResource} webResource The webresource on which to perform the request. +* @param {Stream} outputStream The outgoing request data as a stream. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {function} callback The response callback function. +*/ +StorageServiceClient.prototype.performRequestOutputStream = function (webResource, outputStream, options, callback) { + this._performRequest(webResource, { outputStream: outputStream }, options, callback); +}; + +/** +* Performs a REST service request through HTTP expecting an input stream. +* @ignore +* +* @param {WebResource} webResource The webresource on which to perform the request. +* @param {string} outputData The outgoing request data as a raw string. +* @param {Stream} inputStream The ingoing response data as a stream. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {function} callback The response callback function. +*/ +StorageServiceClient.prototype.performRequestInputStream = function (webResource, outputData, inputStream, options, callback) { + this._performRequest(webResource, { outputData: outputData, inputStream: inputStream }, options, callback); +}; + +/** +* Performs a REST service request through HTTP. +* @ignore +* +* @param {WebResource} webResource The webresource on which to perform the request. +* @param {object} body The request body. +* @param {string} [body.outputData] The outgoing request data as a raw string. +* @param {Stream} [body.outputStream] The outgoing request data as a stream. +* @param {Stream} [body.inputStream] The ingoing response data as a stream. +* @param {object} [options] The request options. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {function} callback The response callback function. +*/ +StorageServiceClient.prototype._performRequest = function (webResource, body, options, callback) { + var self = this; + + // Sets a requestId on the webResource + if(!options.clientRequestId) { + options.clientRequestId = guid.v1(); + } + + webResource.withHeader(HeaderConstants.CLIENT_REQUEST_ID, options.clientRequestId); + + // Sets the user-agent string if the process is not started by the browser + if(!process.browser) { + var userAgentComment = util.format('(NODE-VERSION %s; %s %s)', process.version, os.type(), os.release()); + webResource.withHeader(HeaderConstants.USER_AGENT, Constants.USER_AGENT_PRODUCT_NAME + '/' + Constants.USER_AGENT_PRODUCT_VERSION + ' ' + userAgentComment); + } + + // Initialize the location that the request is going to be sent to. + if(azureutil.objectIsNull(options.locationMode)) { + options.locationMode = this.defaultLocationMode; + } + + // Initialize the location that the request can be sent to. + if(azureutil.objectIsNull(options.requestLocationMode)) { + options.requestLocationMode = defaultRequestLocationMode; + } + + // Initialize whether nagling is used or not. + if(azureutil.objectIsNull(options.useNagleAlgorithm)) { + options.useNagleAlgorithm = this.useNagleAlgorithm; + } + + this._initializeLocation(options); + + // Initialize the operationExpiryTime + this._setOperationExpiryTime(options); + + // If the output stream already got sent to server and got error back, + // we should NOT retry within the SDK as the stream data is not valid anymore if we retry directly. + // And it's very hard for SDK to re-wind the stream. + // + // If users want to retry on this kind of error, they can implement their own logic to parse the response and + // determine if they need to re-prepare a stream and call our SDK API to retry. + // + // Currently for blobs/files with size greater than 32MB (DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES), + // we'll send the steam by chunk buffers which doesn't have this issue. + var outputStreamSent = false; + + var operation = function (options, next) { + self._validateLocation(options); + var currentLocation = options.currentLocation; + self._buildRequestOptions(webResource, body, options, function (err, finalRequestOptions) { + if (err) { + callback({ error: err, response: null }, function (finalRequestOptions, finalCallback) { + finalCallback(finalRequestOptions); + }); + } else { + self.logger.log(Logger.LogLevels.DEBUG, 'FINAL REQUEST OPTIONS:\n' + util.inspect(finalRequestOptions)); + + if(self._maximumExecutionTimeExceeded(Date.now(), options.operationExpiryTime)) { + callback({ error: new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION), response: null }, function (finalRequestOptions, finalCallback) { + finalCallback(finalRequestOptions); + }); + } else { + var processResponseCallback = function (error, response) { + var responseObject; + + if (error) { + responseObject = { error: error, response: null }; + } else { + responseObject = self._processResponse(webResource, response, options); + responseObject.contentMD5 = response.contentMD5; + responseObject.length = response.length; + } + + responseObject.operationEndTime = new Date(); + // Required for listing operations to make sure successive operations go to the same location. + responseObject.targetLocation = currentLocation; + responseObject.outputStreamSent = outputStreamSent; + + callback(responseObject, next); + }; + + var endResponse; + var buildRequest = function (headersOnly, inputStream) { + // Build request (if body was set before, request will process immediately, if not it'll wait for the piping to happen + var requestStream; + + var requestWithDefaults; + + if(self.proxy) { + if(requestWithDefaults === undefined) { + requestWithDefaults = request.defaults({'proxy':self.proxy}); + } + } else { + requestWithDefaults = request; + } + + if (headersOnly) { + requestStream = requestWithDefaults(finalRequestOptions); + + requestStream.on('error', processResponseCallback); + requestStream.on('response', function (response) { + var isValid = WebResource.validResponse(response.statusCode); + if (!isValid) { + // When getting invalid response, try to get the error message for future steps to extract the detailed error information + var contentLength = parseInt(response.headers['content-length']); + var errorMessageBuffer; + var index = 0; + if (contentLength !== undefined) { + errorMessageBuffer = Buffer.alloc(contentLength); + } + + requestStream.on('data', function (data) { + if (contentLength !== undefined) { + data.copy(errorMessageBuffer, index); + index += data.length; + } else { + if (!errorMessageBuffer) { + errorMessageBuffer = data; + } else { + errorMessageBuffer = Buffer.concat([errorMessageBuffer, data]); + } + } + }); + requestStream.on('end', function () { + if (errorMessageBuffer) { + // Strip the UTF8 BOM following the same ways as 'request' module + if (errorMessageBuffer.length > 3 && + errorMessageBuffer[0] === 239 && + errorMessageBuffer[1] === 187 && + errorMessageBuffer[2] === 191) { + response.body = errorMessageBuffer.toString('utf8', 3); + } else { + response.body = errorMessageBuffer.toString('utf8'); + } + } + processResponseCallback(null, response); + }); + } else { + // Only pipe to the destination stream when we get a valid response from service + // Error message should NOT be piped to the destination stream + if (inputStream) { + requestStream.pipe(inputStream); + } + + var responseLength = 0; + var internalHash = new Md5Wrapper().createMd5Hash(); + response.on('data', function(data) { + responseLength += data.length; + internalHash.update(data); + }); + + response.on('end', function () { + // Calculate and set MD5 here + if(azureutil.objectIsNull(options.disableContentMD5Validation) || options.disableContentMD5Validation === false) { + response.contentMD5 = internalHash.digest('base64'); + } + + response.length = responseLength; + endResponse = response; + }); + } + }); + } else { + requestStream = requestWithDefaults(finalRequestOptions, processResponseCallback); + } + + //If useNagleAlgorithm is not set or the value is set and is false, setNoDelay is set to true. + if (azureutil.objectIsNull(options.useNagleAlgorithm) || options.useNagleAlgorithm === false) { + requestStream.on('request', function(httpRequest) { + httpRequest.setNoDelay(true); + }); + } + + // Workaround to avoid request from potentially setting unwanted (rejected) headers by the service + var oldEnd = requestStream.end; + requestStream.end = function () { + if (finalRequestOptions.headers['content-length']) { + requestStream.headers['content-length'] = finalRequestOptions.headers['content-length']; + } else if (requestStream.headers['content-length']) { + delete requestStream.headers['content-length']; + } + + oldEnd.call(requestStream); + }; + + // Bubble events up -- This is when the request is going to be made. + requestStream.on('response', function (response) { + self.emit('receivedResponseEvent', response); + }); + + return requestStream; + }; + + if (body && body.outputData) { + if (!azureutil.isBrowser() && Buffer.isBuffer(body.outputData)) { + // Request module will take 200MB additional memory when we pass a 100MB buffer as body + // Transfer buffer to stream will highly reduce the memory used by request module + finalRequestOptions.body = new BufferStream(body.outputData); + } else { + finalRequestOptions.body = body.outputData; + } + } + + // Pipe any input / output streams + if (body && body.inputStream) { + body.inputStream.on('close', function () { + if (endResponse) { + processResponseCallback(null, endResponse); + endResponse = null; + } + }); + body.inputStream.on('end', function () { + if (endResponse) { + processResponseCallback(null, endResponse); + endResponse = null; + } + }); + body.inputStream.on('finish', function () { + if (endResponse) { + processResponseCallback(null, endResponse); + endResponse = null; + } + }); + buildRequest(true, body.inputStream); + } else if (body && body.outputStream) { + var sendUnchunked = function () { + var size = finalRequestOptions.headers['content-length'] ? + finalRequestOptions.headers['content-length'] : + Constants.BlobConstants.MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES; + + var concatBuf = Buffer.alloc(parseInt(size)); + var index = 0; + + body.outputStream.on('data', function (d) { + outputStreamSent = true; + if(self._maximumExecutionTimeExceeded(Date.now(), options.operationExpiryTime)) { + processResponseCallback(new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION)); + } else { + d.copy(concatBuf, index); + index += d.length; + } + }).on('end', function () { + var requestStream = buildRequest(); + requestStream.write(concatBuf); + requestStream.end(); + }); + if (azureutil.isStreamPaused(body.outputStream)) { + body.outputStream.resume(); + } + }; + + var sendStream = function () { + // NOTE: workaround for an unexpected EPIPE exception when piping streams larger than 29 MB + if (!azureutil.objectIsNull(finalRequestOptions.headers['content-length']) && finalRequestOptions.headers['content-length'] < 29 * 1024 * 1024) { + body.outputStream.pipe(buildRequest()); + outputStreamSent = true; + + if (azureutil.isStreamPaused(body.outputStream)) { + body.outputStream.resume(); + } + } else { + sendUnchunked(); + } + }; + + if (!body.outputStream.readable) { + // if the content length is zero, build the request and don't send a body + if (finalRequestOptions.headers['content-length'] === 0) { + buildRequest(); + } else { + // otherwise, wait until we know the readable stream is actually valid before piping + body.outputStream.on('open', function () { + sendStream(); + }); + } + } else { + sendStream(); + } + + // This catches any errors that happen while creating the readable stream (usually invalid names) + body.outputStream.on('error', function (error) { + processResponseCallback(error); + }); + } else { + buildRequest(); + } + } + } + }); + }; + + // The filter will do what it needs to the requestOptions and will provide a + // function to be handled after the reply + self.filter(options, function (postFiltersRequestOptions, nextPostCallback) { + if(self._maximumExecutionTimeExceeded(Date.now() + postFiltersRequestOptions.retryInterval, postFiltersRequestOptions.operationExpiryTime)) { + callback({ error: new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION), response: null}, function (postFiltersRequestOptions, finalCallback) { + finalCallback(postFiltersRequestOptions); + }); + } else { + // If there is a filter, flow is: + // filter -> operation -> process response + if(postFiltersRequestOptions.retryContext) { + var func = function() { + operation(postFiltersRequestOptions, nextPostCallback); + }; + + // Sleep for retryInterval before making the request + setTimeout(func, postFiltersRequestOptions.retryInterval); + } else { + // No retry policy filter specified + operation(postFiltersRequestOptions, nextPostCallback); + } + } + }); +}; + + +/** +* Builds the request options to be passed to the http.request method. +* @ignore +* @param {WebResource} webResource The webresource where to build the options from. +* @param {object} options The request options. +* @param {function(error, requestOptions)} callback The callback function. +*/ +StorageServiceClient.prototype._buildRequestOptions = function (webResource, body, options, callback) { + webResource.withHeader(HeaderConstants.STORAGE_VERSION, this.apiVersion); + webResource.withHeader(HeaderConstants.MS_DATE, new Date().toUTCString()); + if (!webResource.headers[HeaderConstants.ACCEPT]) { + webResource.withHeader(HeaderConstants.ACCEPT, 'application/atom+xml,application/xml'); + } + webResource.withHeader(HeaderConstants.ACCEPT_CHARSET, 'UTF-8'); + + // Browsers cache GET/HEAD requests by adding conditional headers such as 'IF_MODIFIED_SINCE' after Azure Storage 'Authorization header' calculation, + // which may result in a 403 authorization error. So add timestamp to GET/HEAD request URLs thus avoid the browser cache. + if (azureutil.isBrowser() && ( + webResource.method === Constants.HttpConstants.HttpVerbs.GET || + webResource.method === Constants.HttpConstants.HttpVerbs.HEAD)) { + webResource.withQueryOption(HeaderConstants.FORCE_NO_CACHE_IN_BROWSER, new Date().getTime()); + } + + if(azureutil.objectIsNull(options.timeoutIntervalInMs)) { + options.timeoutIntervalInMs = this.defaultTimeoutIntervalInMs; + } + + if(azureutil.objectIsNull(options.clientRequestTimeoutInMs)) { + options.clientRequestTimeoutInMs = this.defaultClientRequestTimeoutInMs; + } + + if(!azureutil.objectIsNull(options.timeoutIntervalInMs) && options.timeoutIntervalInMs > 0) { + webResource.withQueryOption(QueryStringConstants.TIMEOUT, Math.ceil(options.timeoutIntervalInMs / 1000)); + } + + if(options.accessConditions) { + webResource.withHeader(HeaderConstants.IF_MATCH, options.accessConditions.EtagMatch); + webResource.withHeader(HeaderConstants.IF_MODIFIED_SINCE, options.accessConditions.DateModifedSince); + webResource.withHeader(HeaderConstants.IF_NONE_MATCH, options.accessConditions.EtagNonMatch); + webResource.withHeader(HeaderConstants.IF_UNMODIFIED_SINCE, options.accessConditions.DateUnModifiedSince); + webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_EQUAL, options.accessConditions.SequenceNumberEqual); + webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_LESS_THAN, options.accessConditions.SequenceNumberLessThan); + webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_LESS_THAN_OR_EQUAL, options.accessConditions.SequenceNumberLessThanOrEqual); + webResource.withHeader(HeaderConstants.BLOB_CONDITION_MAX_SIZE, options.accessConditions.MaxBlobSize); + webResource.withHeader(HeaderConstants.BLOB_CONDITION_APPEND_POSITION, options.accessConditions.MaxAppendPosition); + } + + if(options.sourceAccessConditions) { + webResource.withHeader(HeaderConstants.SOURCE_IF_MATCH, options.sourceAccessConditions.EtagMatch); + webResource.withHeader(HeaderConstants.SOURCE_IF_MODIFIED_SINCE, options.sourceAccessConditions.DateModifedSince); + webResource.withHeader(HeaderConstants.SOURCE_IF_NONE_MATCH, options.sourceAccessConditions.EtagNonMatch); + webResource.withHeader(HeaderConstants.SOURCE_IF_UNMODIFIED_SINCE, options.sourceAccessConditions.DateUnModifiedSince); + } + + if (!webResource.headers || webResource.headers[HeaderConstants.CONTENT_TYPE] === undefined) { + // work around to add an empty content type header to prevent the request module from magically adding a content type. + webResource.headers[HeaderConstants.CONTENT_TYPE] = ''; + } else if (webResource.headers && webResource.headers[HeaderConstants.CONTENT_TYPE] === null) { + delete webResource.headers[HeaderConstants.CONTENT_TYPE]; + } + + if (!webResource.headers || webResource.headers[HeaderConstants.CONTENT_LENGTH] === undefined) { + if (body && body.outputData) { + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(body.outputData, 'UTF8')); + } else if (webResource.headers[HeaderConstants.CONTENT_LENGTH] === undefined) { + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, 0); + } + } else if (webResource.headers && webResource.headers[HeaderConstants.CONTENT_LENGTH] === null) { + delete webResource.headers[HeaderConstants.CONTENT_LENGTH]; + } + + var enableGlobalHttpAgent = this.enableGlobalHttpAgent; + + // Sets the request url in the web resource. + this._setRequestUrl(webResource, options); + + this.emit('sendingRequestEvent', webResource); + + // Now that the web request is finalized, sign it + this.storageCredentials.signRequest(webResource, function (error) { + var requestOptions = null; + + if (!error) { + var targetUrl = webResource.uri; + + requestOptions = { + uri: url.format(targetUrl), + method: webResource.method, + headers: webResource.headers, + mode: 'disable-fetch' + }; + + if (options) { + //set encoding of response data. If set to null, the body is returned as a Buffer + requestOptions.encoding = options.responseEncoding; + } + + if (options && options.clientRequestTimeoutInMs) { + requestOptions.timeout = options.clientRequestTimeoutInMs; + } else { + requestOptions.timeout = Constants.DEFAULT_CLIENT_REQUEST_TIMEOUT_IN_MS; // 2 minutes + } + + // If global HTTP agent is not enabled, use forever agent. + if (enableGlobalHttpAgent !== true) { + requestOptions.forever = true; + } + } + + callback(error, requestOptions); + }); +}; + +/** +* Process the response. +* @ignore +* +* @param {WebResource} webResource The web resource that made the request. +* @param {Response} response The response object. +* @param {Options} options The response parsing options. +* @param {String} options.payloadFormat The payload format. +* @return The normalized responseObject. +*/ +StorageServiceClient.prototype._processResponse = function (webResource, response, options) { + var self = this; + + function convertRawHeadersToHeaders(rawHeaders) { + var headers = {}; + if(!rawHeaders) { + return undefined; + } + + for(var i = 0; i < rawHeaders.length; i++) { + var headerName; + if (rawHeaders[i].indexOf(HeaderConstants.PREFIX_FOR_STORAGE_METADATA) === 0) { + headerName = rawHeaders[i]; + } else { + headerName = rawHeaders[i].toLowerCase(); + } + headers[headerName] = rawHeaders[++i]; + } + + return headers; + } + + var validResponse = WebResource.validResponse(response.statusCode); + var rsp = StorageServiceClient._buildResponse(validResponse, response.body, convertRawHeadersToHeaders(response.rawHeaders) || response.headers, response.statusCode, response.md5); + var responseObject; + + if (validResponse && webResource.rawResponse) { + responseObject = { error: null, response: rsp }; + } else { + // attempt to parse the response body, errors will be returned in rsp.error without modifying the body + rsp = StorageServiceClient._parseResponse(rsp, self.xml2jsSettings, options); + + if (validResponse && !rsp.error) { + responseObject = { error: null, response: rsp }; + } else { + rsp.isSuccessful = false; + + if (response.statusCode < 400 || response.statusCode >= 500) { + this.logger.log(Logger.LogLevels.DEBUG, + 'ERROR code = ' + response.statusCode + ' :\n' + util.inspect(rsp.body)); + } + + // responseObject.error should contain normalized parser errors if they occured in _parseResponse + // responseObject.response.body should contain the raw response body in that case + var errorBody = rsp.body; + if(rsp.error) { + errorBody = rsp.error; + delete rsp.error; + } + + if (!errorBody) { + var code = Object.keys(HttpResponseCodes).filter(function (name) { + if (HttpResponseCodes[name] === rsp.statusCode) { + return name; + } + }); + + errorBody = { error: { code: code[0] } }; + } + + var normalizedError = StorageServiceClient._normalizeError(errorBody, response); + responseObject = { error: normalizedError, response: rsp }; + } + } + + this.logger.log(Logger.LogLevels.DEBUG, 'RESPONSE:\n' + util.inspect(responseObject)); + + return responseObject; +}; + +/** +* Associate a filtering operation with this StorageServiceClient. Filtering operations +* can include logging, automatically retrying, etc. Filter operations are objects +* that implement a method with the signature: +* +* "function handle (requestOptions, next)". +* +* After doing its preprocessing on the request options, the method needs to call +* "next" passing a callback with the following signature: +* signature: +* +* "function (returnObject, finalCallback, next)" +* +* In this callback, and after processing the returnObject (the response from the +* request to the server), the callback needs to either invoke next if it exists to +* continue processing other filters or simply invoke finalCallback otherwise to end +* up the service invocation. +* +* @param {Object} filter The new filter object. +* @return {StorageServiceClient} A new service client with the filter applied. +*/ +StorageServiceClient.prototype.withFilter = function (newFilter) { + // Create a new object with the same members as the current service + var derived = _.clone(this); + + // If the current service has a filter, merge it with the new filter + // (allowing us to effectively pipeline a series of filters) + var parentFilter = this.filter; + var mergedFilter = newFilter; + if (parentFilter !== undefined) { + // The parentFilterNext is either the operation or the nextPipe function generated on a previous merge + // Ordering is [f3 pre] -> [f2 pre] -> [f1 pre] -> operation -> [f1 post] -> [f2 post] -> [f3 post] + mergedFilter = function (originalRequestOptions, parentFilterNext) { + newFilter.handle(originalRequestOptions, function (postRequestOptions, newFilterCallback) { + // handle parent filter pre and get Parent filter post + var next = function (postPostRequestOptions, parentFilterCallback) { + // The parentFilterNext is the filter next to the merged filter. + // For 2 filters, that'd be the actual operation. + parentFilterNext(postPostRequestOptions, function (responseObject, responseCallback, finalCallback) { + parentFilterCallback(responseObject, finalCallback, function (postResponseObject) { + newFilterCallback(postResponseObject, responseCallback, finalCallback); + }); + }); + }; + + parentFilter(postRequestOptions, next); + }); + }; + } + + // Store the filter so it can be applied in performRequest + derived.filter = mergedFilter; + return derived; + }; + +/* +* Builds a response object with normalized key names. +* @ignore +* +* @param {Bool} isSuccessful Boolean value indicating if the request was successful +* @param {Object} body The response body. +* @param {Object} headers The response headers. +* @param {int} statusCode The response status code. +* @param {string} md5 The response's content md5 hash. +* @return {Object} A response object. +*/ +StorageServiceClient._buildResponse = function (isSuccessful, body, headers, statusCode, md5) { + var response = { + isSuccessful: isSuccessful, + statusCode: statusCode, + body: body, + headers: headers, + md5: md5 + }; + + if (!azureutil.objectIsNull(headers)) { + if (headers[HeaderConstants.REQUEST_SERVER_ENCRYPTED] !== undefined) { + response.requestServerEncrypted = (headers[HeaderConstants.REQUEST_SERVER_ENCRYPTED] === 'true'); + } + } + + return response; +}; + +/** +* Parses a server response body from XML or JSON into a JS object. +* This is done using the xml2js library. +* @ignore +* +* @param {object} response The response object with a property "body" with a XML or JSON string content. +* @param {object} xml2jsSettings The XML to json settings. +* @param {Options} options The response parsing options. +* @param {String} options.payloadFormat The payload format. +* @return {object} The same response object with the body part as a JS object instead of a XML or JSON string. +*/ +StorageServiceClient._parseResponse = function (response, xml2jsSettings, options) { + function parseXml(body) { + var parsed; + var parser = new xml2js.Parser(xml2jsSettings); + parser.parseString(azureutil.removeBOM(body.toString()), function (err, parsedBody) { + if (err) { + var xmlError = new SyntaxError('EXMLFORMAT'); + xmlError.innerError = err; + throw xmlError; + } else { parsed = parsedBody; } + }); + + return parsed; + } + + if (response.body && Buffer.byteLength(response.body.toString()) > 0) { + var contentType = ''; + if (response.headers && response.headers['content-type']) { + contentType = response.headers['content-type'].toLowerCase(); + } + + try { + if (contentType.indexOf('application/json') !== -1) { + if (options && options.payloadFormat && options.payloadFormat !== TableUtilities.PayloadFormat.NO_METADATA) { + var parser = new Parser(); + parser.onValue = function (value) { + response.body = value; + }; + parser.write(response.body); + } else { + response.body = JSON.parse(response.body); + } + } else if (contentType.indexOf('application/xml') !== -1 || contentType.indexOf('application/atom+xml') !== -1) { + response.body = parseXml(response.body); + } else if (contentType.indexOf('text/html') !== -1) { + response.body = response.body; + } else { + response.body = parseXml(response.body); + // throw new SyntaxError(SR.CONTENT_TYPE_MISSING, null); + } + } catch (e) { + response.error = e; + } + } + + return response; +}; + +/** +* Gets the storage settings. +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string} [host] The host address. +* @param {object} [sas] The Shared Access Signature string. +* @param {TokenCredential} [token] The {@link TokenCredential} object. +* +* @return {StorageServiceSettings} +*/ +StorageServiceClient.getStorageSettings = function (storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token) { + var storageServiceSettings; + if (storageAccountOrConnectionString && !storageAccessKey && !sas) { + // If storageAccountOrConnectionString was passed and no accessKey was passed, assume connection string + storageServiceSettings = StorageServiceSettings.createFromConnectionString(storageAccountOrConnectionString); + } else if ((storageAccountOrConnectionString && storageAccessKey) || sas || token || host) { + // Account and key or credentials or anonymous + storageServiceSettings = StorageServiceSettings.createExplicitly(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token); + } else { + // Use environment variables + storageServiceSettings = StorageServiceSettings.createFromEnvironment(); + } + + return storageServiceSettings; +}; + +/** +* Sets the webResource's requestUrl based on the service client settings. +* @ignore +* +* @param {WebResource} webResource The web resource where to set the request url. +*/ +StorageServiceClient.prototype._setRequestUrl = function (webResource, options) { + // Normalize the path + // Backup the original path of the webResource to make sure it works fine even this function get executed multiple times - like RetryFilter + webResource.originalPath = webResource.originalPath || webResource.path; + webResource.path = this._getPath(webResource.originalPath); + + if(!this.host){ + throw new ArgumentNullError('this.host', SR.STORAGE_HOST_LOCATION_REQUIRED); + } + + var host = this.host.primaryHost; + + if(!azureutil.objectIsNull(options) && options.currentLocation === Constants.StorageLocation.SECONDARY) { + host = this.host.secondaryHost; + } + + if(host && host.lastIndexOf('/') !== (host.length - 1)){ + host = host + '/'; + } + + var fullPath = url.format({pathname: webResource.path, query: webResource.queryString}); + webResource.uri = url.resolve(host, fullPath); + webResource.path = url.parse(webResource.uri).pathname; +}; + +/** +* Retrieves the normalized path to be used in a request. +* It also removes any leading "/" of the path in case +* it's there before. +* @ignore +* @param {string} path The path to be normalized. +* @return {string} The normalized path. +*/ +StorageServiceClient.prototype._getPath = function (path) { + if (path === null || path === undefined) { + path = ''; + } else if (path.indexOf('/') === 0) { + path = path.substring(1); + } + + return path; +}; + +/** + * Get the url of a given path + */ +StorageServiceClient.prototype._getUrl = function (path, sasToken, primary) { + var host; + if (!azureutil.objectIsNull(primary) && primary === false) { + host = this.host.secondaryHost; + } else { + host = this.host.primaryHost; + } + + host = azureutil.trimPortFromUri(host); + if(host && host.lastIndexOf('/') !== (host.length - 1)){ + host = host + '/'; + } + + var query = qs.parse(sasToken); + var fullPath = url.format({ pathname: this._getPath(path), query: query }); + return url.resolve(host, fullPath); +}; + +/** +* Initializes the default filter. +* This filter is responsible for chaining the pre filters request into the operation and, after processing the response, +* pass it to the post processing filters. This method should only be invoked by the StorageServiceClient constructor. +* @ignore +* +*/ +StorageServiceClient.prototype._initDefaultFilter = function () { + this.filter = function (requestOptions, nextPreCallback) { + if (nextPreCallback) { + // Handle the next pre callback and pass the function to be handled as post call back. + nextPreCallback(requestOptions, function (returnObject, finalCallback, nextPostCallback) { + if (nextPostCallback) { + nextPostCallback(returnObject); + } else if (finalCallback) { + finalCallback(returnObject); + } + }); + } + }; +}; + +/** +* Retrieves the metadata headers from the response headers. +* @ignore +* +* @param {object} headers The metadata headers. +* @return {object} An object with the metadata headers (without the "x-ms-" prefix). +*/ +StorageServiceClient.prototype.parseMetadataHeaders = function (headers) { + var metadata = {}; + + if (!headers) { + return metadata; + } + + for (var header in headers) { + if (header.indexOf(HeaderConstants.PREFIX_FOR_STORAGE_METADATA) === 0) { + var key = header.substr(HeaderConstants.PREFIX_FOR_STORAGE_METADATA.length, header.length - HeaderConstants.PREFIX_FOR_STORAGE_METADATA.length); + metadata[key] = headers[header]; + } + } + + return metadata; +}; + +/** +* Gets the properties of a storage account’s service, including Azure Storage Analytics. +* @ignore +* +* @this {StorageServiceClient} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `result` will contain the properties +* and `response` will contain information related to this operation. +*/ +StorageServiceClient.prototype.getAccountServiceProperties = function (optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getServiceProperties', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withQueryOption(QueryStringConstants.RESTYPE, 'service'); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.servicePropertiesResult = null; + if (!responseObject.error) { + responseObject.servicePropertiesResult = ServicePropertiesResult.parse(responseObject.response.body.StorageServiceProperties); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.servicePropertiesResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the properties of a storage account’s service, including Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* +* @this {StorageServiceClient} +* @param {object} serviceProperties The service properties. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +StorageServiceClient.prototype.setAccountServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setServiceProperties', function (v) { + v.object(serviceProperties, 'serviceProperties'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var servicePropertiesXml = ServicePropertiesResult.serialize(serviceProperties); + + var webResource = WebResource.put() + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withQueryOption(QueryStringConstants.RESTYPE, 'service') + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/xml;charset="utf-8"') + .withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(servicePropertiesXml)) + .withBody(servicePropertiesXml); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +// Other functions + +/** +* Processes the error body into a normalized error object with all the properties lowercased. +* +* Error information may be returned by a service call with additional debugging information: +* http://msdn.microsoft.com/en-us/library/windowsazure/dd179382.aspx +* +* Table services returns these properties lowercased, example, "code" instead of "Code". So that the user +* can always expect the same format, this method lower cases everything. +* +* @ignore +* +* @param {Object} error The error object as returned by the service and parsed to JSON by the xml2json. +* @return {Object} The normalized error object with all properties lower cased. +*/ +StorageServiceClient._normalizeError = function (error, response) { + if (azureutil.objectIsString(error)) { + return new StorageError(error, null); + } else if (error) { + var normalizedError = {}; + + // blob/queue errors should have error.Error, table errors should have error['odata.error'] + var errorProperties = error.Error || error.error || error['odata.error'] || error['m:error'] || error; + normalizedError.code = errorProperties.message; // The message exists when there is error.Error. + + for (var property in errorProperties) { + if (errorProperties.hasOwnProperty(property)) { + var key = property.toLowerCase(); + if(key.indexOf('m:') === 0) { + key = key.substring(2); + } + + normalizedError[key] = errorProperties[property]; + + // if this is a table error, message is an object - flatten it to normalize with blob/queue errors + // ex: "message":{"lang":"en-US","value":"The specified resource does not exist."} becomes message: "The specified resource does not exist." + if (key === 'message' && _.isObject(errorProperties[property])) { + if (errorProperties[property]['value']) { + normalizedError[key] = errorProperties[property]['value']; + } + } + } + } + + // add status code and server request id if available + if (response) { + if (response.statusCode) { + normalizedError.statusCode = response.statusCode; + } + + if (response.headers && response.headers['x-ms-request-id']) { + normalizedError.requestId = response.headers['x-ms-request-id']; + } + } + + var errorObject = new StorageError(normalizedError.code, normalizedError); + return errorObject; + } + + return null; +}; + +/** +* Sets proxy object specified by caller. +* +* @param {object} proxy proxy to use for tunneling +* { +* host: hostname +* port: port number +* proxyAuth: 'user:password' for basic auth +* headers: {...} headers for proxy server +* key: key for proxy server +* ca: ca for proxy server +* cert: cert for proxy server +* } +* if null or undefined, clears proxy +*/ +StorageServiceClient.prototype.setProxy = function (proxy) { + if (proxy) { + this.proxy = proxy; + } else { + this.proxy = null; + } +}; + +/** +* Sets the service host default proxy from the environment. +* Can be overridden by calling _setProxyUrl or _setProxy +* +*/ +StorageServiceClient.prototype._setDefaultProxy = function () { + var proxyUrl = StorageServiceClient._loadEnvironmentProxyValue(); + if (proxyUrl) { + var parsedUrl = url.parse(proxyUrl); + if (!parsedUrl.port) { + parsedUrl.port = 80; + } + this.setProxy(parsedUrl); + } else { + this.setProxy(null); + } +}; + +/* +* Loads the fields "useProxy" and respective protocol, port and url +* from the environment values HTTPS_PROXY and HTTP_PROXY +* in case those are set. +* @ignore +* +* @return {string} or null +*/ +StorageServiceClient._loadEnvironmentProxyValue = function () { + var proxyUrl = null; + if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY]) { + proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY]; + } else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY.toLowerCase()]) { + proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY.toLowerCase()]; + } else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY]) { + proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY]; + } else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY.toLowerCase()]) { + proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY.toLowerCase()]; + } + + return proxyUrl; +}; + +/** +* Initializes the location to which the operation is being sent to. +*/ +StorageServiceClient.prototype._initializeLocation = function (options) { + if(!azureutil.objectIsNull(options.locationMode)) { + switch(options.locationMode) { + case StorageUtilities.LocationMode.PRIMARY_ONLY: + case StorageUtilities.LocationMode.PRIMARY_THEN_SECONDARY: + options.currentLocation = Constants.StorageLocation.PRIMARY; + break; + case StorageUtilities.LocationMode.SECONDARY_ONLY: + case StorageUtilities.LocationMode.SECONDARY_THEN_PRIMARY: + options.currentLocation = Constants.StorageLocation.SECONDARY; + break; + default: + throw new RangeError(util.format(SR.ARGUMENT_OUT_OF_RANGE_ERROR, 'locationMode', options.locationMode)); + } + } else { + options.locationMode = StorageUtilities.LocationMode.PRIMARY_ONLY; + options.currentLocation = Constants.StorageLocation.PRIMARY; + } +}; + +/** +* Validates the location to which the operation is being sent to. +*/ +StorageServiceClient.prototype._validateLocation = function (options) { + if(this._invalidLocationMode(options.locationMode)) { + throw new ArgumentNullError('host', SR.STORAGE_HOST_MISSING_LOCATION); + } + + switch(options.requestLocationMode) { + case Constants.RequestLocationMode.PRIMARY_ONLY: + if(options.locationMode === StorageUtilities.LocationMode.SECONDARY_ONLY) { + throw new ArgumentError('host.primaryHost', SR.PRIMARY_ONLY_COMMAND); + } + + options.currentLocation = Constants.StorageLocation.PRIMARY; + options.locationMode = StorageUtilities.LocationMode.PRIMARY_ONLY; + break; + + case Constants.RequestLocationMode.SECONDARY_ONLY: + if(options.locationMode === StorageUtilities.LocationMode.PRIMARY_ONLY) { + throw new ArgumentError('host.secondaryHost', SR.SECONDARY_ONLY_COMMAND); + } + + options.currentLocation = Constants.StorageLocation.SECONDARY; + options.locationMode = StorageUtilities.LocationMode.SECONDARY_ONLY; + break; + + default: + // no op + } +}; + +/** +* Checks whether we have the relevant host information based on the locationMode. +*/ +StorageServiceClient.prototype._invalidLocationMode = function (locationMode) { + switch(locationMode) { + case StorageUtilities.LocationMode.PRIMARY_ONLY: + return azureutil.objectIsNull(this.host.primaryHost); + case StorageUtilities.LocationMode.SECONDARY_ONLY: + return azureutil.objectIsNull(this.host.secondaryHost); + default: + return (azureutil.objectIsNull(this.host.primaryHost) || azureutil.objectIsNull(this.host.secondaryHost)); + } +}; + +/** +* Checks to see if the maximum execution timeout provided has been exceeded. +*/ +StorageServiceClient.prototype._maximumExecutionTimeExceeded = function (currentTime, expiryTime) { + if(!azureutil.objectIsNull(expiryTime) && currentTime > expiryTime) { + return true; + } else { + return false; + } +}; + +/** +* Sets the operation expiry time. +*/ +StorageServiceClient.prototype._setOperationExpiryTime = function (options) { + if(azureutil.objectIsNull(options.operationExpiryTime)) { + if(!azureutil.objectIsNull(options.maximumExecutionTimeInMs)) { + options.operationExpiryTime = Date.now() + options.maximumExecutionTimeInMs; + } else if(this.defaultMaximumExecutionTimeInMs) { + options.operationExpiryTime = Date.now() + this.defaultMaximumExecutionTimeInMs; + } + } +}; + +module.exports = StorageServiceClient; diff --git a/src/node_modules/azure-storage/lib/common/services/storageservicesettings.js b/src/node_modules/azure-storage/lib/common/services/storageservicesettings.js new file mode 100644 index 0000000..44b1c6a --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/services/storageservicesettings.js @@ -0,0 +1,450 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +'use strict'; + +var _ = require('underscore'); +var url = require('url'); + +var util = require('../util/util'); +var ServiceSettings = require('./servicesettings'); +var Constants = require('../util/constants'); +var StorageServiceClientConstants = Constants.StorageServiceClientConstants; +var ConnectionStringKeys = Constants.ConnectionStringKeys; +var Validate = require('../util/validate'); +var SR = require('../util/sr'); +var TokenCredential = require('../models/tokencredential'); + +var useDevelopmentStorageSetting = ServiceSettings.setting(ConnectionStringKeys.USE_DEVELOPMENT_STORAGE_NAME, true); +var developmentStorageProxyUriSetting = ServiceSettings.settingWithFunc(ConnectionStringKeys.DEVELOPMENT_STORAGE_PROXY_URI_NAME, Validate.isValidUri); +var defaultEndpointsProtocolSetting = ServiceSettings.setting(ConnectionStringKeys.DEFAULT_ENDPOINTS_PROTOCOL_NAME, 'http', 'https'); +var accountNameSetting = ServiceSettings.setting(ConnectionStringKeys.ACCOUNT_NAME_NAME); +var accountKeySetting = ServiceSettings.settingWithFunc(ConnectionStringKeys.ACCOUNT_KEY_NAME, Validate.isBase64Encoded); +var sasSetting = ServiceSettings.settingWithFunc(ConnectionStringKeys.SHARED_ACCESS_SIGNATURE_NAME, _.isString); +var tokenSetting = ServiceSettings.settingWithFunc('token', function (object) {return object instanceof TokenCredential;}); + +var blobEndpointSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.BLOB_ENDPOINT_NAME, + Validate.isValidHost +); + +var queueEndpointSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.QUEUE_ENDPOINT_NAME, + Validate.isValidHost +); + +var tableEndpointSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.TABLE_ENDPOINT_NAME, + Validate.isValidHost +); + +var fileEndpointSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.FILE_ENDPOINT_NAME, + Validate.isValidHost +); + +var endpointSuffixSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.ENDPOINT_SUFFIX_NAME, + Validate.isValidHost +); + +var validKeys = [ + ConnectionStringKeys.USE_DEVELOPMENT_STORAGE_NAME, + ConnectionStringKeys.DEVELOPMENT_STORAGE_PROXY_URI_NAME, + ConnectionStringKeys.DEFAULT_ENDPOINTS_PROTOCOL_NAME, + ConnectionStringKeys.ACCOUNT_NAME_NAME, + ConnectionStringKeys.ACCOUNT_KEY_NAME, + ConnectionStringKeys.SHARED_ACCESS_SIGNATURE_NAME, + ConnectionStringKeys.BLOB_ENDPOINT_NAME, + ConnectionStringKeys.QUEUE_ENDPOINT_NAME, + ConnectionStringKeys.TABLE_ENDPOINT_NAME, + ConnectionStringKeys.FILE_ENDPOINT_NAME, + ConnectionStringKeys.ENDPOINT_SUFFIX_NAME +]; + +/** +* Creates new storage service settings instance. +* +* @param {string} name The storage service name. +* @param {string} key The storage service key. +* @param {string} sasToken The storage service shared access signature token. +* @param {string} blobEndpoint The storage service blob endpoint. +* @param {string} queueEndpoint The storage service queue endpoint. +* @param {string} tableEndpoint The storage service table endpoint. +* @param {string} fileEndpoint The storage service file endpoint. +* @param {bool} usePathStyleUri Boolean value indicating wether to use path style uri or not. +* @param {TokenCredential} [token] The {@link TokenCredential} object. +*/ +function StorageServiceSettings(name, key, sasToken, blobEndpoint, queueEndpoint, tableEndpoint, fileEndpoint, usePathStyleUri, token) { + this._name = name; + this._key = key; + + if (sasToken && sasToken[0] === '?') { + this._sasToken = sasToken.slice(1); + } else { + this._sasToken = sasToken; + } + + this._blobEndpoint = blobEndpoint; + this._queueEndpoint = queueEndpoint; + this._tableEndpoint = tableEndpoint; + this._fileEndpoint = fileEndpoint; + + if (usePathStyleUri) { + this._usePathStyleUri = usePathStyleUri; + } else { + this._usePathStyleUri = false; + } + + this._token = token; +} + +/** +* Creates a StorageServiceSettings object from the given connection string. +* +* @param {string} connectionString The storage settings connection string. +* @return {StorageServiceSettings} +*/ +StorageServiceSettings.createFromConnectionString = function (connectionString) { + var tokenizedSettings = ServiceSettings.parseAndValidateKeys(connectionString, validKeys); + + try { + return StorageServiceSettings.createFromSettings(tokenizedSettings); + } catch (e) { + if (e instanceof ServiceSettings.NoMatchError) { + // Replace no match settings exception by no match connection string one. + ServiceSettings.noMatchConnectionString(connectionString); + } else { + throw e; + } + } +}; + +StorageServiceSettings.createExplicitly = function (storageAccount, storageAccessKey, host, sasToken, endpointSuffix, token) { + var settings = {}; + function addIfNotNullOrEmpty(key, value){ + if(typeof value === 'string' && !util.stringIsEmpty(value)){ + settings[key] = value; + } else if (typeof value == 'object' && !util.objectIsNull(value)) { + settings[key] = value; + } + } + + // Endpoints + if (host) { + addIfNotNullOrEmpty('blobendpoint', host); + addIfNotNullOrEmpty('tableendpoint', host); + addIfNotNullOrEmpty('queueendpoint', host); + addIfNotNullOrEmpty('fileendpoint', host); + } else { + addIfNotNullOrEmpty('defaultendpointsprotocol', ServiceSettings.DEFAULT_PROTOCOL.split(':', 1)[0]); + } + + addIfNotNullOrEmpty('accountname', storageAccount); + addIfNotNullOrEmpty('accountkey', storageAccessKey); + addIfNotNullOrEmpty('sharedaccesssignature', sasToken); + addIfNotNullOrEmpty('endpointsuffix', endpointSuffix); + addIfNotNullOrEmpty('token', token); + + return StorageServiceSettings.createFromSettings(settings); +}; + +StorageServiceSettings.createFromEnvironment = function () { + var emulated = process.env[StorageServiceClientConstants.EnvironmentVariables.EMULATED]; + if (emulated) { + return StorageServiceSettings.getDevelopmentStorageAccountSettings(); + } + + var connectionString = process.env[StorageServiceClientConstants.EnvironmentVariables.AZURE_STORAGE_CONNECTION_STRING]; + if (connectionString) { + return StorageServiceSettings.createFromConnectionString(connectionString); + } + + var storageAccount = process.env[StorageServiceClientConstants.EnvironmentVariables.AZURE_STORAGE_ACCOUNT]; + var storageAccessKey = process.env[StorageServiceClientConstants.EnvironmentVariables.AZURE_STORAGE_ACCESS_KEY]; + if(storageAccount && storageAccessKey){ + return StorageServiceSettings.createExplicitly(storageAccount, storageAccessKey, null, null, null); + } + + throw new Error(SR.NO_CREDENTIALS_PROVIDED); +}; + +/** +* Creates a StorageServiceSettings object from a set of settings. +* +* @param {object} settings The settings object. +* @return {StorageServiceSettings} +*/ +StorageServiceSettings.createFromSettings = function (settings) { + // Devstore case + var matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired(useDevelopmentStorageSetting), + ServiceSettings.optional(developmentStorageProxyUriSetting) + ); + + if (matchedSpecs) { + var proxyUri = util.tryGetValueInsensitive( + ConnectionStringKeys.DEVELOPMENT_STORAGE_PROXY_URI_NAME, + settings + ); + + return this.getDevelopmentStorageAccountSettings(proxyUri); + } + + // Account/Key automatic case + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + defaultEndpointsProtocolSetting, + accountNameSetting, + accountKeySetting + ), + ServiceSettings.optional( + blobEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + fileEndpointSetting, + endpointSuffixSetting + ) + ); + + if (matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + // Account/Key explicit case + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + accountNameSetting, + accountKeySetting + ), + ServiceSettings.atLeastOne( + blobEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + fileEndpointSetting, + endpointSuffixSetting + ) + ); + + if (matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + // SAS case + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + sasSetting + ), + ServiceSettings.atLeastOne( + blobEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + fileEndpointSetting, + endpointSuffixSetting + ) + ); + + if(matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + // anonymous explicit case + // Only blob anonymous access is valid. + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + blobEndpointSetting + ), + ServiceSettings.optional( + fileEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + endpointSuffixSetting + ) + ); + + if(matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + // Token case + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + tokenSetting + ), + ServiceSettings.atLeastOne( + blobEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + fileEndpointSetting + ) + ); + + if(matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + ServiceSettings.noMatchSettings(settings); +}; + +/** +* Returns a StorageServiceSettings with development storage credentials using +* the specified proxy Uri. +* +* @param {string} proxyUri The proxy endpoint to use. +* @return {StorageServiceSettings} +*/ +StorageServiceSettings.getDevelopmentStorageAccountSettings = function (proxyUri) { + if (!proxyUri) { + proxyUri = StorageServiceClientConstants.DEV_STORE_URI; + } + + var parsedUri = url.parse(proxyUri); + var scheme = parsedUri.protocol; + var host = parsedUri.host; + var prefix = scheme + '//' + host; + + var blobEndpoint = { + primaryHost: prefix + ':10000' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT, + secondaryHost: prefix + ':10000' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT + '-secondary' + }; + + var queueEndpoint = { + primaryHost: prefix + ':10001' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT, + secondaryHost: prefix + ':10001' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT + '-secondary' + }; + + var tableEndpoint = { + primaryHost: prefix + ':10002' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT, + secondaryHost: prefix + ':10002' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT + '-secondary' + }; + + return new StorageServiceSettings( + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT, + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCESS_KEY, + null, + blobEndpoint, + queueEndpoint, + tableEndpoint, + null, + true + ); +}; + +/** +* Creates StorageServiceSettings object given endpoints uri. +* +* @ignore +* @param {array} settings The service settings. +* @param {string} blobEndpointUri The blob endpoint uri. +* @param {string} queueEndpointUri The queue endpoint uri. +* @param {string} tableEndpointUri The table endpoint uri. +* @param {string} fileEndpointUri The file endpoint uri. +* @return {StorageServiceSettings} +*/ +StorageServiceSettings._createStorageServiceSettings = function (settings) { + var standardizeHost = function (host, accountName, scheme, dns){ + var storageHost; + if (host) { + storageHost = {}; + storageHost.primaryHost = _.isString(host) ? host : host.primaryHost; + storageHost.secondaryHost = _.isString(host) ? undefined : host.secondaryHost; + } + + if (scheme && accountName && dns) { + storageHost = storageHost ? storageHost : {}; + storageHost.primaryHost = storageHost.primaryHost ? storageHost.primaryHost : url.format({ protocol: scheme, hostname: accountName + '.' + dns}); + storageHost.secondaryHost = storageHost.secondaryHost ? storageHost.secondaryHost : url.format({ protocol: scheme, hostname: accountName + '-secondary.' + dns}); + } + + return storageHost; + }; + + var scheme = util.tryGetValueInsensitive( + ConnectionStringKeys.DEFAULT_ENDPOINTS_PROTOCOL_NAME, + settings + ); + + var accountName = util.tryGetValueInsensitive( + ConnectionStringKeys.ACCOUNT_NAME_NAME, + settings + ); + + var accountKey = util.tryGetValueInsensitive( + ConnectionStringKeys.ACCOUNT_KEY_NAME, + settings + ); + + var sasToken = util.tryGetValueInsensitive( + ConnectionStringKeys.SHARED_ACCESS_SIGNATURE_NAME, + settings + ); + + var endpointSuffix = util.tryGetValueInsensitive( + ConnectionStringKeys.ENDPOINT_SUFFIX_NAME, + settings + ); + + var token = util.tryGetValueInsensitive( + 'token', + settings + ); + + var blobEndpoint = standardizeHost( + util.tryGetValueInsensitive(ConnectionStringKeys.BLOB_ENDPOINT_NAME, settings), + accountName, + scheme, + endpointSuffix ? 'blob.' + endpointSuffix : StorageServiceClientConstants.CLOUD_BLOB_HOST); + + var queueEndpoint = standardizeHost( + util.tryGetValueInsensitive(ConnectionStringKeys.QUEUE_ENDPOINT_NAME, settings), + accountName, + scheme, + endpointSuffix ? 'queue.' + endpointSuffix : StorageServiceClientConstants.CLOUD_QUEUE_HOST); + + var tableEndpoint = standardizeHost( + util.tryGetValueInsensitive(ConnectionStringKeys.TABLE_ENDPOINT_NAME, settings), + accountName, + scheme, + endpointSuffix ? 'table.' + endpointSuffix : StorageServiceClientConstants.CLOUD_TABLE_HOST); + + var fileEndpoint = standardizeHost( + util.tryGetValueInsensitive(ConnectionStringKeys.FILE_ENDPOINT_NAME, settings), + accountName, + scheme, + endpointSuffix ? 'file.' + endpointSuffix : StorageServiceClientConstants.CLOUD_FILE_HOST); + + + return new StorageServiceSettings( + accountName, + accountKey, + sasToken, + blobEndpoint, + queueEndpoint, + tableEndpoint, + fileEndpoint, + token + ); +}; + +StorageServiceSettings.validKeys = validKeys; + +exports = module.exports = StorageServiceSettings; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/signing/hmacsha256sign.js b/src/node_modules/azure-storage/lib/common/signing/hmacsha256sign.js new file mode 100644 index 0000000..c7a0edc --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/signing/hmacsha256sign.js @@ -0,0 +1,43 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var crypto = require('crypto'); + +/** +* Creates a new HmacSHA256Sign object. +* +* @constructor +*/ +function HmacSha256Sign(accessKey) { + this._accessKey = accessKey; + this._decodedAccessKey = Buffer.from(this._accessKey, 'base64'); +} + +/** +* Computes a signature for the specified string using the HMAC-SHA256 algorithm. +* +* @param {string} stringToSign The UTF-8-encoded string to sign. +* @return A String that contains the HMAC-SHA256-encoded signature. +*/ +HmacSha256Sign.prototype.sign = function (stringToSign) { + // Encoding the Signature + // Signature=Base64(HMAC-SHA256(UTF8(StringToSign))) + + return crypto.createHmac('sha256', this._decodedAccessKey).update(stringToSign, 'utf-8').digest('base64'); +}; + +module.exports = HmacSha256Sign; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/signing/sharedaccesssignature.js b/src/node_modules/azure-storage/lib/common/signing/sharedaccesssignature.js new file mode 100644 index 0000000..1a95891 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/signing/sharedaccesssignature.js @@ -0,0 +1,56 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +var HmacSha256Sign = require('./hmacsha256sign'); +var Constants = require('./../util/constants'); +var SR = require('./../util/sr'); + +/** +* Creates a new SharedAccessSignature object. +* +* @constructor +* @param {string} sasToken The sasToken. +*/ +function SharedAccessSignature(sasToken) { + this.sasToken = sasToken; + this.signer = new HmacSha256Sign(sasToken); +} + +/** +* Signs a request with the signature header. +* +* @this {SharedAccessSignature} +* @param {WebResource} The webresource to be signed. +* @param {function(error)} callback The callback function. +*/ +SharedAccessSignature.prototype.signRequest = function (webResource, callback) { + if (webResource.uri.indexOf('?') === -1) { + webResource.uri += '?'; + } else { + webResource.uri += '&'; + } + + webResource.uri += this.sasToken; + + // Add the api-version + if (this.sasToken.indexOf('api-version') == -1) { + webResource.uri += '&' + Constants.QueryStringConstants.API_VERSION + '=' + Constants.HeaderConstants.TARGET_STORAGE_VERSION; + } else { + throw new SyntaxError(SR.INVALID_SAS_TOKEN); + } + callback(null); +}; + +module.exports = SharedAccessSignature; diff --git a/src/node_modules/azure-storage/lib/common/signing/sharedkey.js b/src/node_modules/azure-storage/lib/common/signing/sharedkey.js new file mode 100644 index 0000000..5e18808 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/signing/sharedkey.js @@ -0,0 +1,500 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = require('underscore'); +var qs = require('querystring'); + +var azureutil = require('../util/util'); +var HmacSha256Sign = require('./hmacsha256sign'); +var SR = require('../util/sr'); +var errors = require('../errors/errors'); +var ArgumentError = errors.ArgumentError; + +var Constants = require('../util/constants'); +var HeaderConstants = Constants.HeaderConstants; +var QueryStringConstants = Constants.QueryStringConstants; +var HeaderConstants = Constants.HeaderConstants; +var CompatibleVersionConstants = Constants.CompatibleVersionConstants; + +/** +* Creates a new SharedKey object. +* +* @constructor +* @param {string} storageAccount The storage account. +* @param {string} storageAccessKey The storage account's access key. +* @param {bool} usePathStyleUri Boolean value indicating if the path, or the hostname, should include the storage account. +*/ +function SharedKey(storageAccount, storageAccessKey, usePathStyleUri) { + this.storageAccount = storageAccount; + this.storageAccessKey = storageAccessKey; + this.usePathStyleUri = usePathStyleUri; + this.signer = new HmacSha256Sign(storageAccessKey); +} + +/** +* Generates the shared access signature for a account. +* For more detailed information, refer to https://msdn.microsoft.com/en-us/library/azure/mt584140.aspx +* +* @param {object} sharedAccessPolicy The shared access policy. +* @param {SharedAccessServices} sharedAccessPolicy.AccessPolicy.Services The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.Services`. +* @param {SharedAccessResourceTypes} sharedAccessPolicy.AccessPolicy.ResourceTypes The resource type for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.Resources`. +* @param {SharedAccessPermissions} sharedAccessPolicy.AccessPolicy.Permissions The permissions for a shared access signature. +* Refer to `Constants.AccountSasConstants.Permissions`. +* @param {date} sharedAccessPolicy.AccessPolicy.Start The time at which the Shared Access Signature becomes valid. +* @param {date} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired. +* @param {string} sharedAccessPolicy.AccessPolicy.IPAddressOrRange An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} sharedAccessPolicy.AccessPolicy.Protocols The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* Refer to `Constants.AccountSasConstants.Protocols`. +* @return {string} The shared access signature. +*/ +SharedKey.prototype.generateAccountSignedQueryString = function (sharedAccessPolicy) { + var addIfNotNull = function (queryString, name, value) { + if (!azureutil.objectIsNull(name) && !azureutil.objectIsNull(value)) { + queryString[name] = value; + } + }; + + var formatAccessPolicyDates = function (accessPolicy) { + if (!azureutil.objectIsNull(accessPolicy.Start)) { + if (!_.isDate(accessPolicy.Start)) { + accessPolicy.Start = new Date(accessPolicy.Start); + } + + accessPolicy.Start = azureutil.truncatedISO8061Date(accessPolicy.Start); + } + + if (!azureutil.objectIsNull(accessPolicy.Expiry)) { + if (!_.isDate(accessPolicy.Expiry)) { + accessPolicy.Expiry = new Date(accessPolicy.Expiry); + } + + accessPolicy.Expiry = azureutil.truncatedISO8061Date(accessPolicy.Expiry); + } + }; + + var queryString = {}; + + addIfNotNull(queryString, QueryStringConstants.SIGNED_VERSION, HeaderConstants.TARGET_STORAGE_VERSION); + + // add shared access policy params + if (sharedAccessPolicy.AccessPolicy) { + formatAccessPolicyDates(sharedAccessPolicy.AccessPolicy); + + addIfNotNull(queryString, QueryStringConstants.SIGNED_SERVICES, sharedAccessPolicy.AccessPolicy.Services); + addIfNotNull(queryString, QueryStringConstants.SIGNED_RESOURCE_TYPES, sharedAccessPolicy.AccessPolicy.ResourceTypes); + addIfNotNull(queryString, QueryStringConstants.SIGNED_PERMISSIONS, sharedAccessPolicy.AccessPolicy.Permissions); + addIfNotNull(queryString, QueryStringConstants.SIGNED_START, sharedAccessPolicy.AccessPolicy.Start); + addIfNotNull(queryString, QueryStringConstants.SIGNED_EXPIRY, sharedAccessPolicy.AccessPolicy.Expiry); + addIfNotNull(queryString, QueryStringConstants.SIGNED_IP, sharedAccessPolicy.AccessPolicy.IPAddressOrRange); + addIfNotNull(queryString, QueryStringConstants.SIGNED_PROTOCOL, sharedAccessPolicy.AccessPolicy.Protocols); + } + + // add signature + addIfNotNull(queryString, QueryStringConstants.SIGNATURE, this._generateAccountSharedAccessSignature(sharedAccessPolicy)); + + return qs.stringify(queryString); +}; + + +/** +* Generates the signature part of the shared access signature for a account. +* For more detailed information, refer to https://msdn.microsoft.com/en-us/library/azure/mt584140.aspx +* +* @param {object} sharedAccessPolicy The shared access policy. +* @param {SharedAccessServices} sharedAccessPolicy.AccessPolicy.Services The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.Services`. +* @param {SharedAccessResourceTypes} sharedAccessPolicy.AccessPolicy.ResourceTypes The resource type for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.ResourceTypes`. +* @param {SharedAccessPermissions} sharedAccessPolicy.AccessPolicy.Permissions The permissions for a shared access signature. +* Refer to `Constants.AccountSasConstants.Permissions`. +* @param {date} sharedAccessPolicy.AccessPolicy.Start The time at which the Shared Access Signature becomes valid. +* @param {date} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired. +* @param {string} sharedAccessPolicy.AccessPolicy.IPAddressOrRange An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} sharedAccessPolicy.AccessPolicy.Protocols The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* Refer to `Constants.AccountSasConstants.Protocols`. +* @return {string} The signature part of the shared access signature. +*/ +SharedKey.prototype._generateAccountSharedAccessSignature = function(sharedAccessPolicy){ + var getvalueToAppend = function (value, noNewLine) { + var returnValue = ''; + if (!azureutil.objectIsNull(value)) { + returnValue = value; + } + + if (noNewLine !== true) { + returnValue += '\n'; + } + + return returnValue; + }; + + var stringToSign = getvalueToAppend(this.storageAccount) + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Permissions : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Services : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.ResourceTypes : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Start : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Expiry : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.IPAddressOrRange : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Protocols : '') + + getvalueToAppend(HeaderConstants.TARGET_STORAGE_VERSION); + + return this.signer.sign(stringToSign); +}; + +/** +* Signs a request with the Authentication header. +* +* @param {WebResource} webResource The webresource to be signed. +* @param {function(error)} callback The callback function. +*/ +SharedKey.prototype.signRequest = function (webResource, callback) { + var getvalueToAppend = function (value, headerName) { + // Do not sign content-length 0 in 2014-08-16 and later + if (headerName === HeaderConstants.CONTENT_LENGTH && (azureutil.objectIsNull(value[headerName]) || value[headerName].toString() === '0')) { + return '\n'; + } else if (azureutil.objectIsNull(value) || azureutil.objectIsNull(value[headerName])) { + return '\n'; + } else { + return value[headerName] + '\n'; + } + }; + + var stringToSign = + webResource.method + '\n' + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_ENCODING) + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_LANGUAGE) + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_LENGTH) + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_MD5) + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_TYPE) + + getvalueToAppend(webResource.headers, HeaderConstants.DATE) + + getvalueToAppend(webResource.headers, HeaderConstants.IF_MODIFIED_SINCE) + + getvalueToAppend(webResource.headers, HeaderConstants.IF_MATCH) + + getvalueToAppend(webResource.headers, HeaderConstants.IF_NONE_MATCH) + + getvalueToAppend(webResource.headers, HeaderConstants.IF_UNMODIFIED_SINCE) + + getvalueToAppend(webResource.headers, HeaderConstants.RANGE) + + this._getCanonicalizedHeaders(webResource) + + this._getCanonicalizedResource(webResource); + + var signature = this.signer.sign(stringToSign); + + webResource.withHeader(HeaderConstants.AUTHORIZATION, 'SharedKey ' + this.storageAccount + ':' + signature); + callback(null); +}; + +/* +* Retrieves the webresource's canonicalized resource string. +* @param {WebResource} webResource The webresource to get the canonicalized resource string from. +* @return {string} The canonicalized resource string. +*/ +SharedKey.prototype._getCanonicalizedResource = function (webResource) { + var path = '/'; + if (webResource.path) { + path = webResource.path; + } + + var canonicalizedResource = '/' + this.storageAccount + path; + + // Get the raw query string values for signing + var queryStringValues = webResource.queryString; + + // Build the canonicalized resource by sorting the values by name. + if (queryStringValues) { + var paramNames = []; + Object.keys(queryStringValues).forEach(function (n) { + paramNames.push(n); + }); + + paramNames = paramNames.sort(); + Object.keys(paramNames).forEach(function (name) { + canonicalizedResource += '\n' + paramNames[name] + ':' + queryStringValues[paramNames[name]]; + }); + } + + return canonicalizedResource; +}; + +/* +* Constructs the Canonicalized Headers string. +* +* To construct the CanonicalizedHeaders portion of the signature string, +* follow these steps: 1. Retrieve all headers for the resource that begin +* with x-ms-, including the x-ms-date header. 2. Convert each HTTP header +* name to lowercase. 3. Sort the headers lexicographically by header name, +* in ascending order. Each header may appear only once in the +* string. 4. Unfold the string by replacing any breaking white space with a +* single space. 5. Trim any white space around the colon in the header. 6. +* Finally, append a new line character to each canonicalized header in the +* resulting list. Construct the CanonicalizedHeaders string by +* concatenating all headers in this list into a single string. +* +* @param {object} The webresource object. +* @return {string} The canonicalized headers. +*/ +SharedKey.prototype._getCanonicalizedHeaders = function (webResource) { + // Build canonicalized headers + var canonicalizedHeaders = ''; + if (webResource.headers) { + var canonicalizedHeadersArray = []; + for (var header in webResource.headers) { + if (header.indexOf(HeaderConstants.PREFIX_FOR_STORAGE) === 0) { + var headerItem = { canonicalized: header.toLowerCase(), original: header }; + canonicalizedHeadersArray.push(headerItem); + } + } + + canonicalizedHeadersArray.sort(function(a, b) { return a.canonicalized.localeCompare(b.canonicalized); }); + + _.each(canonicalizedHeadersArray, function (currentHeaderItem) { + var value = webResource.headers[currentHeaderItem.original]; + if (!azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(value)) { + canonicalizedHeaders += currentHeaderItem.canonicalized + ':' + value + '\n'; + } else { + canonicalizedHeaders += currentHeaderItem.canonicalized + ':\n'; + } + }); + } + + return canonicalizedHeaders; +}; + +/** +* Generates the query string for a shared access signature signing. +* +* @this {SharedAccessSignature} +* @param {string} serviceType The service type. +* @param {string} path The path to the resource. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {SharedAccessPermissions} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid. +* @param {date} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired. +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {string} sasVersion A string indicating the desired SAS Version to use, in storage service version format. Value must be 2012-02-12 or later. +* @parma {ResourceTypes} [args.resourceType] The resource type, if the resource is a blob or container. Null if the resource is a queue or table. +* @parma {ResourceTypes} [args.tableName] The table name, if the resource is a table. Null if the resource is a blob orqueue. +* @parma {ResourceTypes} [args.queryString] The query string, if additional parameters are desired. +* @param {object} [args.headers] The optional header values to set for a blob returned wth this SAS. +* @param {string} [args.headers.CacheControl] The value of the Cache-Control response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentType] The value of the Content-Type response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentEncoding] The value of the Content-Encoding response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentLanguage] The value of the Content-Language response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentDisposition] The value of the Content-Disposition response header to be returned when this SAS is used. +* @return {object} The shared access signature query string. +*/ +SharedKey.prototype.generateSignedQueryString = function (serviceType, path, sharedAccessPolicy, sasVersion, args) { + var addIfNotNull = function (queryString, name, value) { + if (!azureutil.objectIsNull(name) && !azureutil.objectIsNull(value)) { + queryString[name] = value; + } + }; + + var validateVersion = function (sasVersion) { + // validate and add version + if (azureutil.objectIsNull(sasVersion)) { + return HeaderConstants.TARGET_STORAGE_VERSION; + } else { + var values = _.values(CompatibleVersionConstants); + if (values.some(function(version) { + return version.toLowerCase() === sasVersion.toLowerCase(); + })) { + return sasVersion; + } else { + throw new ArgumentError('sasVersion', azureutil.stringFormat(SR.INVALID_SAS_VERSION, sasVersion, values)); + } + } + }; + + var formatAccessPolicyDates = function (accessPolicy) { + if (!azureutil.objectIsNull(accessPolicy.Start)) { + if (!_.isDate(accessPolicy.Start)) { + accessPolicy.Start = new Date(accessPolicy.Start); + } + + accessPolicy.Start = azureutil.truncatedISO8061Date(accessPolicy.Start); + } + + if (!azureutil.objectIsNull(accessPolicy.Expiry)) { + if (!_.isDate(accessPolicy.Expiry)) { + accessPolicy.Expiry = new Date(accessPolicy.Expiry); + } + + accessPolicy.Expiry = azureutil.truncatedISO8061Date(accessPolicy.Expiry); + } + }; + + // set up optional args + var queryString; + var resourceType; + var headers; + var tableName; + + if(args) { + queryString = args.queryString; + resourceType = args.resourceType; + tableName = args.tableName; + headers = args.headers; + } + + if(!queryString) { + queryString = {}; + } + + // add shared access policy params + if (sharedAccessPolicy.AccessPolicy) { + formatAccessPolicyDates(sharedAccessPolicy.AccessPolicy); + + addIfNotNull(queryString, QueryStringConstants.SIGNED_START, sharedAccessPolicy.AccessPolicy.Start); + addIfNotNull(queryString, QueryStringConstants.SIGNED_EXPIRY, sharedAccessPolicy.AccessPolicy.Expiry); + addIfNotNull(queryString, QueryStringConstants.SIGNED_PERMISSIONS, sharedAccessPolicy.AccessPolicy.Permissions); + addIfNotNull(queryString, QueryStringConstants.SIGNED_IP, sharedAccessPolicy.AccessPolicy.IPAddressOrRange); + addIfNotNull(queryString, QueryStringConstants.SIGNED_PROTOCOL, sharedAccessPolicy.AccessPolicy.Protocols); + + // tables only + addIfNotNull(queryString, QueryStringConstants.STARTPK, sharedAccessPolicy.AccessPolicy.StartPk); + addIfNotNull(queryString, QueryStringConstants.ENDPK, sharedAccessPolicy.AccessPolicy.EndPk); + addIfNotNull(queryString, QueryStringConstants.STARTRK, sharedAccessPolicy.AccessPolicy.StartRk); + addIfNotNull(queryString, QueryStringConstants.ENDRK, sharedAccessPolicy.AccessPolicy.EndRk); + } + + // validate and add version + var validatedSASVersionString = validateVersion(sasVersion); + addIfNotNull(queryString, QueryStringConstants.SIGNED_VERSION, validatedSASVersionString); + + // add signed identifier + addIfNotNull(queryString, QueryStringConstants.SIGNED_IDENTIFIER, sharedAccessPolicy.Id); + + // blobs only + addIfNotNull(queryString, QueryStringConstants.SIGNED_RESOURCE, resourceType); + if (headers) { + addIfNotNull(queryString, QueryStringConstants.CACHE_CONTROL, headers.cacheControl); + addIfNotNull(queryString, QueryStringConstants.CONTENT_TYPE, headers.contentType); + addIfNotNull(queryString, QueryStringConstants.CONTENT_ENCODING, headers.contentEncoding); + addIfNotNull(queryString, QueryStringConstants.CONTENT_LANGUAGE, headers.contentLanguage); + addIfNotNull(queryString, QueryStringConstants.CONTENT_DISPOSITION, headers.contentDisposition); + } + + // tables only + addIfNotNull(queryString, QueryStringConstants.TABLENAME, tableName); + + // add signature + addIfNotNull(queryString, QueryStringConstants.SIGNATURE, this._generateSignature(serviceType, path, sharedAccessPolicy, validatedSASVersionString, {resourceType: resourceType, headers: headers, tableName: tableName})); + + return qs.stringify(queryString); +}; + +/** +* Generates the shared access signature for a resource. +* +* @this {SharedAccessSignature} +* @param {string} serviceType The service type. +* @param {string} path The path to the resource. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {SharedAccessPermissions} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid. +* @param {date} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired. +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {string} sasVersion A string indicating the desired SAS Version to use, in storage service version format. Value must be 2012-02-12 or later. +* @parma {ResourceTypes} [args.resourceType] The resource type, if the resource is a blob or container. Null if the resource is a queue or table. +* @parma {ResourceTypes} [args.tableName] The table name, if the resource is a table. Null if the resource is a blob or queue. +* @param {object} [args.headers] The optional header values to set for a blob returned wth this SAS. +* @param {string} [args.headers.CacheControl] The value of the Cache-Control response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentType] The value of the Content-Type response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentEncoding] The value of the Content-Encoding response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentLanguage] The value of the Content-Language response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentDisposition] The value of the Content-Disposition response header to be returned when this SAS is used. +* @return {string} The shared access signature. +*/ +SharedKey.prototype._generateSignature = function (serviceType, path, sharedAccessPolicy, sasVersion, args) { + var getvalueToAppend = function (value, noNewLine) { + var returnValue = ''; + if (!azureutil.objectIsNull(value)) { + returnValue = value; + } + + if (noNewLine !== true) { + returnValue += '\n'; + } + + return returnValue; + }; + + // set up optional args + var resourceType; + var tableName; + var headers; + if(args) { + resourceType = args.resourceType; + tableName = args.tableName; + headers = args.headers; + } + + // Add leading slash to path + if (path.substr(0, 1) !== '/') { + path = '/' + path; + } + + var canonicalizedResource; + if (sasVersion === CompatibleVersionConstants.FEBRUARY_2012 || sasVersion === CompatibleVersionConstants.AUGUST_2013) { + // Do not prepend service name for older versions + canonicalizedResource = '/' + this.storageAccount + path; + } else { + canonicalizedResource = '/' + serviceType + '/' + this.storageAccount + path; + } + + var stringToSign = getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Permissions : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Start : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Expiry : '') + + getvalueToAppend(canonicalizedResource) + + getvalueToAppend(sharedAccessPolicy.Id) + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.IPAddressOrRange : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Protocols : '') + + sasVersion; + + if(sasVersion == CompatibleVersionConstants.FEBRUARY_2012) { + if(headers) { + throw new ArgumentError('args.headers', SR.INVALID_HEADERS); + } + } else if (resourceType) { + stringToSign += '\n' + + getvalueToAppend(headers ? headers.cacheControl : '') + + getvalueToAppend(headers ? headers.contentDisposition : '') + + getvalueToAppend(headers ? headers.contentEncoding : '') + + getvalueToAppend(headers ? headers.contentLanguage : '') + + getvalueToAppend(headers ? headers.contentType : '', true); + } + + if(tableName) { + stringToSign += '\n' + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.StartPk : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.StartRk : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.EndPk : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.EndRk : '', true); + } + + return this.signer.sign(stringToSign); +}; + +module.exports = SharedKey; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/signing/tokensigner.js b/src/node_modules/azure-storage/lib/common/signing/tokensigner.js new file mode 100644 index 0000000..ce71e69 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/signing/tokensigner.js @@ -0,0 +1,41 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var Constants = require('../util/constants'); +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new TokenSigner object. +* +* @constructor +* @param {TokenCredential} tokenCredential The token credential, such as containing an OAuth access token. +*/ +function TokenSigner (tokenCredential) { + this.tokenCredential = tokenCredential; +} + +/** +* Signs a request with the Authentication header. +* +* @param {WebResource} webResource The webresource to be signed. +* @param {function(error)} callback The callback function. +*/ +TokenSigner.prototype.signRequest = function (webResource, callback) { + webResource.withHeader(HeaderConstants.AUTHORIZATION, 'Bearer ' + this.tokenCredential.get()); + callback(null); +}; + +module.exports = TokenSigner; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/streams/batchoperation.js b/src/node_modules/azure-storage/lib/common/streams/batchoperation.js new file mode 100644 index 0000000..1366b56 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/batchoperation.js @@ -0,0 +1,425 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = require('util'); +var http = require('http'); +var https = require('https'); +var EventEmitter = require('events').EventEmitter; +var os = require('os'); + +var azureutil = require('../util/util'); +var Logger = require('../diagnostics/logger'); +var Constants = require('../util/constants'); +var errors = require('../errors/errors'); +var ArgumentError = errors.ArgumentError; + +var DEFAULT_OPERATION_MEMORY_USAGE = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; +var DEFAULT_CRITICAL_MEMORY_LIMITATION_32_IN_BYTES = Constants.BlobConstants.DEFAULT_CRITICAL_MEMORY_LIMITATION_32_IN_BYTES; +var DEFAULT_CRITICAL_MEMORY_LIMITATION_BROWSER_IN_BYTES = Constants.BlobConstants.DEFAULT_CRITICAL_MEMORY_LIMITATION_BROWSER_IN_BYTES; +var DEFAULT_MINIMUM_MEMORY_USAGE_BROWSER_IN_BYTES = Constants.BlobConstants.DEFAULT_MINIMUM_MEMORY_USAGE_BROWSER_IN_BYTES; +var DEFAULT_GLOBAL_CONCURRENCY = 5; //Default http connection limitation for nodejs + +var SystemTotalMemory = os.totalmem(); +var CriticalFreeMemory = 0.1 * SystemTotalMemory; +var nodeVersion = azureutil.getNodeVersion(); + +/** +* Concurrently execute batch operations and call operation callback randomly or in sequence. +* Random mode is for uploading. +* 1. Fire user callback when the operation is done. +* Sequence mode is for downloading. +* 1. Fire user callback when the operation is done and all previous operations and callback has finished. +* 2. BatchOperation guarantees the user callback is fired one by one. +* 3. The next user callback can't be fired until the current one is completed. +*/ +function BatchOperation(name, options) { + if (!options) { + options = {}; + } + + this.name = name; + this.logger = options.logger || new Logger(Logger.LogLevels.INFO); + this.operationMemoryUsage = options.operationMemoryUsage || DEFAULT_OPERATION_MEMORY_USAGE; + this.callbackInOrder = options.callbackInOrder === true; + this.callInOrder = options.callInOrder === true; + this._currentOperationId = this.callbackInOrder ? 1 : -1; + this.concurrency = DEFAULT_GLOBAL_CONCURRENCY; + this.enableReuseSocket = (nodeVersion.major > 0 || nodeVersion.minor >= 10) && options.enableReuseSocket; + + this._emitter = new EventEmitter(); + this._enableComplete = false; + this._ended = false; + this._error = null; + this._paused = false; + + //Total operations count(queued and active and connected) + this._totalOperation = 0; + + //Action operations count(The operations which are connecting to remote or executing callback or queued for executing) + this._activeOperation = 0; + + //Queued operations count(The operations which are connecting to remote or queued for executing) + this._queuedOperation = 0; + + //finished operation should be removed from this array + this._operations = []; +} + +/** +* Operation state +*/ +var OperationState = { + INITED : 'inited', + QUEUED : 'queued', + RUNNING : 'running', + COMPLETE : 'complete', + CALLBACK : 'callback', + ERROR : 'error' +}; + +BatchOperation.OperationState = OperationState; + +/** +* Set batch operation concurrency +*/ +BatchOperation.prototype.setConcurrency = function(concurrency) { + if (concurrency) { + this.concurrency = concurrency; + http.Agent.maxSockets = this.concurrency; + https.Agent.maxSockets = this.concurrency; + } +}; + +/** +* Is the workload heavy and It can be used to determine whether we could queue operations +*/ +BatchOperation.prototype.IsWorkloadHeavy = function() { + //Only support one batch operation for now. + //In order to work with the multiple batch operation, we can use global operation track objects + //BatchOperation acquire a bunch of operation ids from global and allocated ids to RestOperation + //RestOperation start to run in order of id + var sharedRequest = 1; + if(this.enableReuseSocket && !this.callInOrder) { + sharedRequest = 2; + } + return this._activeOperation >= sharedRequest * this.concurrency || this._isLowMemory(); +}; + +/** +* Get the approximate memory usage for batch operation. +*/ +BatchOperation.prototype._getApproximateMemoryUsage = function() { + var currentUsage = azureutil.isBrowser() ? DEFAULT_MINIMUM_MEMORY_USAGE_BROWSER_IN_BYTES : process.memoryUsage().rss; // Currently, we cannot get memory usage in browsers + var futureUsage = this._queuedOperation * this.operationMemoryUsage; + return currentUsage + futureUsage; +}; + +/** +* Return whether in a low memory situation. +*/ +BatchOperation.prototype._isLowMemory = function() { + var approximateMemoryUsage = this._getApproximateMemoryUsage(); + return os.freemem() < CriticalFreeMemory || + (this._activeOperation >= this.concurrency && approximateMemoryUsage > 0.5 * SystemTotalMemory) || + (azureutil.is32() && approximateMemoryUsage > DEFAULT_CRITICAL_MEMORY_LIMITATION_32_IN_BYTES) || + (azureutil.isBrowser() && approximateMemoryUsage > DEFAULT_CRITICAL_MEMORY_LIMITATION_BROWSER_IN_BYTES); +}; + +/** +* Add a operation into batch operation +*/ +BatchOperation.prototype.addOperation = function(operation) { + this._operations.push(operation); + operation.status = OperationState.QUEUED; + operation.operationId = ++this._totalOperation; + this._queuedOperation++; + this.logger.debug(util.format('Add operation %d into batch operation %s. Active: %s; Queued: %s', operation.operationId, this.name, this._activeOperation, this._queuedOperation)); + //Immediately start the idle operation if workload isn't heavy + this._runOperation(operation); + return this.IsWorkloadHeavy(); +}; + +/** +* Enable batch operation complete when there is no operation to run. +*/ +BatchOperation.prototype.enableComplete = function() { + this._enableComplete = true; + this.logger.debug(util.format('Enable batch operation %s complete', this.name)); + this._tryEmitEndEvent(); +}; + +/** +* Stop firing user call back +*/ +BatchOperation.prototype.pause = function () { + this._paused = true; +}; + +/** +* Start firing user call back +*/ +BatchOperation.prototype.resume = function () { + if (this._paused) { + this._paused = false; + this._fireOperationUserCallback(); + } +}; + +/** +* Add event listener +*/ +BatchOperation.prototype.on = function(event, listener) { + // only emit end if the batch has completed all operations + if(this._ended && event === 'end') { + listener(); + } else { + this._emitter.on(event, listener); + } +}; + +/** +* Run operation +*/ +BatchOperation.prototype._runOperation = function (operation) { + this.logger.debug(util.format('Operation %d start to run', operation.operationId)); + var cb = this.getBatchOperationCallback(operation); + + if(this._error) { + cb(this._error);//Directly call the callback with previous error. + } else { + operation.run(cb); + } + + this._activeOperation++; +}; + +/** +* Return an general operation call back. +* This callback is used to update the internal status and fire user's callback when operation is complete. +*/ +BatchOperation.prototype.getBatchOperationCallback = function (operation) { + var self = this; + return function (error) { + self._queuedOperation--; + if (error) { + operation.status = OperationState.ERROR; + self.logger.debug(util.format('Operation %d failed. Error %s', operation.operationId, error)); + self._error = error; + } else { + operation.status = OperationState.CALLBACK; + self.logger.debug(util.format('Operation %d succeed', operation.operationId)); + } + + operation._callbackArguments = arguments; + if (self._paused) { + operation.status = OperationState.CALLBACK; + self.logger.debug(util.format('Batch operation paused and Operation %d wait for firing callback', operation.operationId)); + } else if (self.callbackInOrder) { + operation.status = OperationState.CALLBACK; + if (self._currentOperationId === operation.operationId) { + self._fireOperationUserCallback(operation); + } else if (self._currentOperationId > operation.operationId) { + throw new Error('Debug error: current callback operation id cannot be larger than operation id'); + } else { + self.logger.debug(util.format('Operation %d is waiting for firing callback %s', operation.operationId, self._currentOperationId)); + } + } else { + self._fireOperationUserCallback(operation); + } + + self._tryEmitDrainEvent(); + operation = null; + self = null; + }; +}; + +/** +* Fire user's call back +*/ +BatchOperation.prototype._fireOperationUserCallback = function (operation) { + var index = this._getCallbackOperationIndex(); + if (!operation && index != -1) { + operation = this._operations[index]; + } + + if (operation && !this._paused) { + // fire the callback, if exists + if (operation._userCallback) { + this.logger.debug(util.format('Fire user call back for operation %d', operation.operationId)); + // make sure UserCallback is a sync operation in sequence mode. + // both async and sync operations are available for random mode. + operation._fireUserCallback(); + } + + // remove the operation from the array and decrement the counter + this._operations.splice(index, 1); + this._activeOperation--; + operation.status = OperationState.COMPLETE; + index = operation = null; + + if (this.callbackInOrder) { + this._currentOperationId++; + } + + this._fireOperationUserCallback(); + } else if (this._paused) { + this._tryEmitDrainEvent(); + } else { + // check if batch has ended and if so emit end event + this._tryEmitEndEvent(); + } +}; + +/** +* Try to emit the BatchOperation end event +* End event means all the operation and callback already finished. +*/ +BatchOperation.prototype._tryEmitEndEvent = function () { + if(this._enableComplete && this._activeOperation === 0 && this._operations.length === 0) { + this._ended = true; + this.logger.debug(util.format('Batch operation %s emits the end event', this.name)); + this._emitter.emit('end', this._error, null); + return true; + } + + // Workaround to recover from the 'hang' edge case. _tryEmitEndEvent function is not supposed to be called if the bacth is not really completed. + this._tryEmitDrainEvent(); + return false; +}; + +/** +* Try to emit the drain event +*/ +BatchOperation.prototype._tryEmitDrainEvent = function () { + if (!this._emitter) return false; + if(!this.IsWorkloadHeavy() || this._activeOperation < this.concurrency) { + this._emitter.emit('drain'); + return true; + } + return false; +}; + +/** +* Get the current active operation index. +* Only the active operation could call user's callback in sequence model. +* The other finished but not active operations should wait for wake up. +*/ +BatchOperation.prototype._getCallbackOperationIndex = function () { + var operation = null; + for (var i = 0; i < this._operations.length; i++) { + operation = this._operations[i]; + if (this.callbackInOrder) { + //Sequence mode + if (operation.operationId == this._currentOperationId) { + if (operation.status === OperationState.CALLBACK) { + return i; + } else { + return -1; + } + } + } else { + //Random mode + if (operation.status === OperationState.CALLBACK) { + return i; + } + } + } + return -1; +}; + +/** +* Do nothing and directly call the callback. +* In random mode, the user callback will be called immediately +* In sequence mode, the user callback will be called after the previous callback has been called +*/ +BatchOperation.noOperation = function (cb) { + cb(); +}; + +/** +* Rest operation in sdk +*/ +function RestOperation(serviceClient, operation) { + this.status = OperationState.Inited; + this.operationId = -1; + this._callbackArguments = null; + + // setup callback and args + this._userCallback = arguments[arguments.length - 1]; + var sliceEnd = arguments.length; + if(azureutil.objectIsFunction(this._userCallback)) { + sliceEnd--; + } else { + this._userCallback = null; + } + var operationArguments = Array.prototype.slice.call(arguments).slice(2, sliceEnd); + + this.run = function(cb) { + var func = serviceClient[operation]; + if(!func) { + throw new ArgumentError('operation', util.format('Unknown operation %s in serviceclient', operation)); + } else { + if(!cb) cb = this._userCallback; + operationArguments.push(cb); + this.status = OperationState.RUNNING; + func.apply(serviceClient, operationArguments); + operationArguments = operation = null; + } + }; + + this._fireUserCallback = function () { + if(this._userCallback) { + this._userCallback.apply(null, this._callbackArguments); + } + }; +} + +BatchOperation.RestOperation = RestOperation; + +/** +* Common operation wrapper +*/ +function CommonOperation(operationFunc, callback) { + this.status = OperationState.Inited; + this.operationId = -1; + this._callbackArguments = null; + var sliceStart = 2; + if (azureutil.objectIsFunction(callback)) { + this._userCallback = callback; + } else { + this._userCallback = null; + sliceStart = 1; + } + var operationArguments = Array.prototype.slice.call(arguments).slice(sliceStart); + this.run = function (cb) { + if (!cb) cb = this._userCallback; + operationArguments.push(cb); + this.status = OperationState.RUNNING; + operationFunc.apply(null, operationArguments); + operationArguments = operationFunc = null; + }; + + this._fireUserCallback = function () { + if (this._userCallback) { + this._userCallback.apply(null, this._callbackArguments); + } + this._userCallback = this._callbackArguments = null; + }; +} + +BatchOperation.CommonOperation = CommonOperation; + +module.exports = BatchOperation; diff --git a/src/node_modules/azure-storage/lib/common/streams/browserfilereadstream.js b/src/node_modules/azure-storage/lib/common/streams/browserfilereadstream.js new file mode 100644 index 0000000..0178820 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/browserfilereadstream.js @@ -0,0 +1,61 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var buffer = require('buffer').Buffer; +var stream = require('stream'); +var util = require('util'); + +var Constants = require('../util/constants'); +var bufferSize = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + +function BrowserFileReadStream(file, options) { + stream.Readable.call(this, options); + + if (!options) { + options = {}; + } + + this._fileReader = new FileReader(file); // HTML FileReader + this._file = file; + this._size = file.size; + this._highWaterMark = options.highWaterMark || bufferSize; + this._offset = 0; + var self = this; + + this._fileReader.onloadend = function (event) { + var data = event.target.result; + var buf = buffer.from(data); + self.push(buf); + }; + + this._fileReader.onerror = function (error) { + self.emit('error', error); + }; +} +util.inherits(BrowserFileReadStream, stream.Readable); + +BrowserFileReadStream.prototype._read = function() { + if (this._offset >= this._size) { + this.push(null); + } else { + var end = this._offset + this._highWaterMark; + var slice = this._file.slice(this._offset, end); + this._fileReader.readAsArrayBuffer(slice); + this._offset = end; + } +}; + +module.exports = BrowserFileReadStream; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/streams/bufferstream.js b/src/node_modules/azure-storage/lib/common/streams/bufferstream.js new file mode 100644 index 0000000..6bac838 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/bufferstream.js @@ -0,0 +1,50 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var stream = require('stream'); +var util = require('util'); + +function BufferStream(buffer, options) { + stream.Readable.call(this, options); + + this._buffer = buffer; + this._offset = 0; + this._chunkSize = 4 * 1024 * 1024; + this._bufferSize = buffer.length; +} + +util.inherits(BufferStream, stream.Readable); + +BufferStream.prototype._read = function () { + while (this.push(this._readNextChunk())) { + continue; + } +}; + +BufferStream.prototype._readNextChunk = function () { + var data = null; + + if (this._offset < this._bufferSize) { + var end = this._offset + this._chunkSize; + end = end > this._bufferSize ? this._bufferSize : end; + data = this._buffer.slice(this._offset, end); + this._offset = end; + } + + return data; +}; + +module.exports = BufferStream; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/streams/chunkallocator.js b/src/node_modules/azure-storage/lib/common/streams/chunkallocator.js new file mode 100644 index 0000000..2ecc2e1 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/chunkallocator.js @@ -0,0 +1,128 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +/** +* Chunked memory pool allocator. +* It could dramatically reduce the memory usage. +* However, it can't dramatically reduce the CPU time since GC in v8 is very efficient. +*/ +function ChunkAllocator(chunkSize, maxCount) { + // Track the unused buffers and number of used buffers + this._pool = []; + this._inuse = 0; + + // Buffer size + this._chunkSize = chunkSize; + + // If total memory is larger than this._chunkSize * this._maxCount, the buffer pool is not used. + this._maxCount = maxCount || 10; + + // Immediately add a buffer to the pool. + this._extendMemoryPool(); +} + +/** +* Synchronously require a buffer +* Caller should be aware of that the content of buffer is random since the Buffer.fill is Time-consumed opreation. +*/ +ChunkAllocator.prototype.getBuffer = function(size) { + var buffer = this._getBufferFromPool(size); + if (buffer === null) { + // Either the total memory is larger than this._chunkSize * this._maxCount + // Or, the size does not match the chunk size of the pool + buffer = Buffer.alloc(size); + } + + this._inuse++; + return buffer; +}; + +/** +* Get buffer from the current memory pool. +*/ +ChunkAllocator.prototype._getBufferFromPool = function(size) { + // Return null if the given size does not match the chunk size of the buffer pool. + if(size !== this._chunkSize) { + return null; + } + + // Extend the memory pool if it is empty. + if(this._pool.length === 0) { + this._extendMemoryPool(); + } + + // If the pool is not empty, return a buffer. + if(this._pool.length !== 0) { + return this._pool.pop(); + } else { + return null; + } +}; + +/** +* Extend the memory pool if the maximum size has not been reached. +*/ +ChunkAllocator.prototype._extendMemoryPool = function() { + var total = this._pool.length + this._inuse; + + // If the total is larger than the max, do not allocate more memory. + if(total >= this._maxCount) return; + + // Calculate the new number of buffers, equal to the total*2 bounded by 1 and the maxCount + var nextSize = Math.min(total * 2, this._maxCount) || 1; + + // Add more buffers. + var increment = nextSize - total; + for(var i = 0; i < increment; i++) { + var buffer = Buffer.alloc(this._chunkSize); + this._pool.push(buffer); + } +}; + +/** +* Release the buffer. +*/ +ChunkAllocator.prototype.releaseBuffer = function(buffer) { + if(buffer.length !== this._chunkSize) { + // Directly delete the buffer if bufferSize is invalid and wait for GC. + buffer = null; + return; + } + + // Add the buffer to the pool if it is not full, otherwise delete it + if (this._pool.length < this._maxCount) { + this._pool.push(buffer); + } else { + buffer = null; + } + + // Decrement _inuse + this._inuse--; + + // _inuse could be below zero if a buffer is released which was not returned by getBuffer + if(this._inuse < 0) { + this._inuse = 0; + } +}; + +/** +* Destroy ChunkAllocator. +*/ +ChunkAllocator.prototype.destroy = function() { + this._pool = []; + this._inuse = 0; +}; + +module.exports = ChunkAllocator; diff --git a/src/node_modules/azure-storage/lib/common/streams/chunkstream.js b/src/node_modules/azure-storage/lib/common/streams/chunkstream.js new file mode 100644 index 0000000..d92b7f4 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/chunkstream.js @@ -0,0 +1,321 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var stream = require('stream'); +var util = require('util'); + +var azureutil = require('../util/util'); +var Md5Wrapper = require('../md5-wrapper'); +var Constants = require('../util/constants'); +var bufferSize = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + +/** +* Chunk stream +* 1. Calculate md5 +* 2. Track reading offset +* 3. Work with customize memory allocator +* 4. Buffer data from stream. +* @param {object} options stream.Readable options +*/ +function ChunkStream(options) { + stream.Stream.call(this); + this.writable = this.readable = true; + + if (!options) { + options = {}; + } + + this._highWaterMark = options.highWaterMark || bufferSize; + + this._paused = undefined; //True/false is the external status from users. + + this._isStreamOpened = false; + this._offset = 0; + this._allocator = options.allocator; + this._streamEnded = false; + this._md5hash = null; + this._buffer = null; + this._internalBufferSize = 0; + this._outputLengthLimit = 0; + this._md5sum = undefined; + + if (options.calcContentMd5) { + this._md5hash = new Md5Wrapper().createMd5Hash(); + } +} + +util.inherits(ChunkStream, stream.Stream); + +/** +* Set the memory allocator. +*/ +ChunkStream.prototype.setMemoryAllocator = function(allocator) { + this._allocator = allocator; +}; + +/** +* Set the output length. +*/ +ChunkStream.prototype.setOutputLength = function(length) { + if (length) { + this._outputLengthLimit = length; + } +}; + +/** +* Internal stream ended +*/ +ChunkStream.prototype.end = function (chunk, encoding, cb) { + if (typeof chunk === 'function') { + cb = chunk; + chunk = null; + encoding = null; + } else if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (chunk) { + this.write(chunk, encoding); + } + + this._streamEnded = true; + this._flushInternalBuffer(); + + if (cb) { + this.once('end', cb); + } + + this.emit('end'); +}; + +ChunkStream.prototype.finish = function () { + this.emit('finish'); + + this.destroy(); +}; + +ChunkStream.prototype.error = function () { + this.emit('error'); + + this.destroy(); +}; + +ChunkStream.prototype.destroy = function () { + this.writable = this.readable = false; + + if (this._allocator && azureutil.objectIsFunction(this._allocator.destroy)) { + this._allocator.destroy(); + } + + this.emit('close'); +}; + +ChunkStream.prototype.stop = function () { + this.destroy(); + this._streamEnded = true; + this.emit('end'); +}; + +/** +* Add event listener +*/ +ChunkStream.prototype.write = function (chunk, encoding) { + if (!this._isStreamOpened) { + this._isStreamOpened = true; + } + + this._buildChunk(chunk, encoding); + + return !this._paused; +}; + +/** +* Buffer the data into a chunk and emit it +*/ +ChunkStream.prototype._buildChunk = function (data) { + if (typeof data == 'string') { + data = Buffer.from(data); + } + var dataSize = data.length; + var dataOffset = 0; + do { + var buffer = null; + var targetSize = this._internalBufferSize + dataSize; + + if (targetSize < this._highWaterMark) { + // add the data to the internal buffer and return as it is not yet full + this._copyToInternalBuffer(data, dataOffset, data.length); + return; + } else if (targetSize == this._highWaterMark){ + var canReleaseInnerStreamBuffer = this._stream && this._stream._allocator && this._stream._allocator.releaseBuffer; + if(this._internalBufferSize === 0 && data.length === this._highWaterMark && !canReleaseInnerStreamBuffer) { + // set the buffer to the data passed in to avoid creating a new buffer + buffer = data; + } else { + // add the data to the internal buffer and pop that buffer + this._copyToInternalBuffer(data, dataOffset, data.length); + buffer = this._popInternalBuffer(); + } + dataSize = 0; + } else { + // add data to the internal buffer until its full, then return it + // set the dataSize parameter so that additional data is not lost + var copySize = this._highWaterMark - this._internalBufferSize; + this._copyToInternalBuffer(data, dataOffset, dataOffset + copySize); + dataSize -= copySize; + dataOffset += copySize; + buffer = this._popInternalBuffer(); + } + this._emitBufferData(buffer); + } while(dataSize > 0); +}; + +/** +* Emit the buffer +*/ +ChunkStream.prototype._emitBufferData = function(buffer) { + var newOffset = this._offset + buffer.length; + var range = { + start : this._offset, + end : newOffset - 1, + size : buffer.length + }; + + this._offset = newOffset; + + if (this._outputLengthLimit > 0) { + // When the start postion is larger than the limit, no data will be consumed though there is an event to be emitted. + // So the buffer should not be calculated. + if (range.start <= this._outputLengthLimit) { + if (this._offset > this._outputLengthLimit) { + // Don't use negative end parameter which means the index starting from the end of the buffer + // to be compatible with node 0.8. + buffer = buffer.slice(0, buffer.length - (this._offset - this._outputLengthLimit)); + } + if (this._md5hash) { + this._md5hash.update(buffer); + } + } + } else if (this._md5hash) { + this._md5hash.update(buffer); + } + + this.emit('data', buffer, range); +}; + +/** +* Copy data into internal buffer +*/ +ChunkStream.prototype._copyToInternalBuffer = function(data, start, end) { + if(start === undefined) start = 0; + if(end === undefined) end = data.length; + if (!this._buffer) { + this._buffer = this._allocateNewBuffer(); + this._internalBufferSize = 0; + } + var copied = data.copy(this._buffer, this._internalBufferSize, start, end); + this._internalBufferSize += copied; + + if (this._stream && this._stream._allocator && this._stream._allocator.releaseBuffer) { + this._stream._allocator.releaseBuffer(data); + } + + if(copied != (end - start)) { + throw new Error('Can not copy entire data to buffer'); + } +}; + +/** +* Flush internal buffer +*/ +ChunkStream.prototype._flushInternalBuffer = function() { + var buffer = this._popInternalBuffer(); + if (buffer) { + this._emitBufferData(buffer); + } +}; + +/** +* Pop internal buffer +*/ +ChunkStream.prototype._popInternalBuffer = function () { + var buf = null; + if (!this._buffer || this._internalBufferSize === 0) { + buf = null; + } else if(this._internalBufferSize == this._highWaterMark) { + buf = this._buffer; + } else { + buf = this._buffer.slice(0, this._internalBufferSize); + } + + this._buffer = null; + this._internalBufferSize = 0; + + return buf; +}; + +/** +* Allocate a buffer +*/ +ChunkStream.prototype._allocateNewBuffer = function() { + var size = this._highWaterMark; + if(this._allocator && azureutil.objectIsFunction(this._allocator.getBuffer)) { + return this._allocator.getBuffer(size); + } else { + var buffer = Buffer.alloc(size); + return buffer; + } +}; + +/** +* Get file content md5 when read completely. +*/ +ChunkStream.prototype.getContentMd5 = function(encoding) { + if (!encoding) encoding = 'base64'; + if(!this._md5hash) { + throw new Error('Can\'t get content md5, please set the calcContentMd5 option for FileReadStream.'); + } else { + if (this._streamEnded) { + if (!this._md5sum) { + this._md5sum = this._md5hash.digest(encoding); + } + return this._md5sum; + } else { + throw new Error('Stream has not ended.'); + } + } +}; + +/** +* Pause chunk stream +*/ +ChunkStream.prototype.pause = function() { + this._paused = true; +}; + +/** +* Resume read stream +*/ +ChunkStream.prototype.resume = function() { + if (this._paused) { + this._paused = false; + + this.emit('drain'); + } +}; + +module.exports = ChunkStream; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/streams/chunkstreamwithstream.js b/src/node_modules/azure-storage/lib/common/streams/chunkstreamwithstream.js new file mode 100644 index 0000000..4c3d6b2 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/chunkstreamwithstream.js @@ -0,0 +1,103 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var ChunkStream = require('./chunkstream'); +var EventEmitter = require('events').EventEmitter; +var util = require('util'); +var azureutil = require('./../util/util'); + +/** +* Chunk stream +* 1. Calculate md5 +* 2. Track reading offset +* 3. Work with customize memory allocator +* 4. Buffer data from stream. +* @param {object} options stream.Readable options +*/ +function ChunkStreamWithStream(stream, options) { + ChunkStream.call(this, options); + + stream.pause(); // Pause stream and wait for data listener. It's useful for node v0.8 + this._stream = stream; + this._stream.on('end', this.end.bind(this)); // Should catch the end event for node v0.8 +} + +util.inherits(ChunkStreamWithStream, ChunkStream); + +/** +* Add event listener +*/ +ChunkStreamWithStream.prototype.on = function(event, listener) { + if(event === 'end' && this._streamEnded) { + listener(); //Directly call the end event when stream already ended + } else { + EventEmitter.prototype.on.call(this, event, listener); + } + + if (event === 'data') { + if (!this._isStreamOpened) { + this._isStreamOpened = true; + this._stream.on('data', this._buildChunk.bind(this)); + } + if (this._paused === undefined) { + this._stream.resume(); + } + } + + return this; +}; + +/** +* Stop stream from external +*/ +ChunkStreamWithStream.prototype.stop = function (chunk, encoding, cb) { + if (azureutil.objectIsFunction(this._stream.destroy)) { + this._stream.destroy(); + } else { + this.pause(); + } + ChunkStream.prototype.end.call(this, chunk, encoding, cb); +}; + +/** +* Pause chunk stream +*/ +ChunkStreamWithStream.prototype.pause = function () { + ChunkStream.prototype.pause.call(this); + + this._stream.pause(); +}; + +/** +* Resume read stream +*/ +ChunkStreamWithStream.prototype.resume = function() { + ChunkStream.prototype.resume.call(this); + + this._stream.resume(); +}; + +ChunkStreamWithStream.prototype.finish = function () { + ChunkStream.prototype.finish.call(this); + this._stream.emit.call(this._stream, 'finish'); +}; + +ChunkStreamWithStream.prototype.destroy = function () { + ChunkStream.prototype.destroy.call(this); + this._stream.emit.call(this._stream, 'close'); +}; + +module.exports = ChunkStreamWithStream; diff --git a/src/node_modules/azure-storage/lib/common/streams/filereadstream.js b/src/node_modules/azure-storage/lib/common/streams/filereadstream.js new file mode 100644 index 0000000..368824f --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/filereadstream.js @@ -0,0 +1,261 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var stream = require('stream'); +var util = require('util'); +var fs = require('fs'); +var validator = require('validator'); + +var Md5Wrapper = require('../md5-wrapper'); +var Constants = require('../util/constants'); +var bufferSize = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + +var EventEmitter = require('events').EventEmitter; + +/** +* File read stream +* 1. Calculate md5 +* 2. Track reading offset +* 3. Work with customize memory allocator +* 4. Buffer data from stream. +* @param {object} options stream.Readable options +*/ +function FileReadStream(path, options) { + stream.Stream.call(this); + this.readable = true; + + if(!options) { + options = {}; + } + + this._destroyed = false; + this._streamEnded = false; + this._fd = null; + this._fileName = undefined; + this._highWaterMark = options.highWaterMark || bufferSize; + this._offset = 0; + this._paused = undefined; + this._allocator = options.allocator; + this._fileName = path; + + this._md5hash = null; + this._md5sum = undefined; + + if (options.calcContentMd5) { + this._md5hash = new Md5Wrapper().createMd5Hash(); + } + + this._open(); +} + +util.inherits(FileReadStream, stream.Stream); + +/** +* Open file +*/ +FileReadStream.prototype._open = function () { + var flags = 'r'; + var self = this; + fs.open(this._fileName, flags, function(error, fd) { + if (error) { + self.emit('error', error); + } else { + self._fd = fd; + self.emit('open', fd); + } + }); +}; + +/** +* Add event listener +*/ +FileReadStream.prototype.on = function(event, listener) { + if (event === 'data' && this._paused === undefined) { + this._paused = false; + this._emitData(); + } + + return EventEmitter.prototype.on.call(this, event, listener); +}; + +/** +* Set memory allocator +*/ +FileReadStream.prototype.setMemoryAllocator = function(allocator) { + this._allocator = allocator; +}; + +/** +* Get buffer +*/ +FileReadStream.prototype._getBuffer = function(size) { + if(this._allocator && this._allocator.getBuffer) { + return this._allocator.getBuffer(size); + } else { + var buffer = Buffer.alloc(size); + return buffer; + } +}; + +/** +* Release buffer +*/ +FileReadStream.prototype._releaseBuffer = function(buffer) { + if(this._allocator && this._allocator.releaseBuffer) { + this._allocator.releaseBuffer(buffer); + } +}; + +/** +* Emit the data from file +*/ +FileReadStream.prototype._emitData = function() { + var self = this; + if(!this._fd) { + this.once('open', function() { + self._emitData(); + }); + return; + } + + if (this._paused || this._streamEnded) { + return; + } + var buffer = this._getBuffer(this._highWaterMark); + fs.read(this._fd, buffer, 0, this._highWaterMark, this._offset, function(error, bytesRead, readBuffer) { + if (error) { + self.emit('error', error); + return; + } + + if(bytesRead === 0) { + if(!self._streamEnded) { + self._streamEnded = true; + self.emit('end'); + } + return; + } + + var range = { + start : self._offset, + end : self._offset + bytesRead - 1, + size : bytesRead + }; + + var data; + if(bytesRead == self._highWaterMark) { + data = readBuffer; + } else { + data = readBuffer.slice(0, bytesRead); + //Release the current buffer since we created a new one + self._releaseBuffer(readBuffer); + } + + if(self._md5hash) { + self._md5hash.update(data); + } + + self.emit('data', data, range); + + // cleanup + self._offset += bytesRead; + buffer = readBuffer = data = null; + self._emitData(); + }); +}; + +/** +* Get file content md5 when read completely. +*/ +FileReadStream.prototype.getContentMd5 = function(encoding) { + if (!encoding) encoding = 'base64'; + if(!this._md5hash) { + throw new Error('Can\'t get content md5, please set the calcContentMd5 option for FileReadStream.'); + } else { + if (this._streamEnded) { + if (!this._md5sum) { + this._md5sum = this._md5hash.digest(encoding); + } + return this._md5sum; + } else { + throw new Error('FileReadStream has not ended.'); + } + } +}; + +/** +* Pause chunk stream +*/ +FileReadStream.prototype.pause = function() { + this._paused = true; +}; + +/** +* Resume read stream +*/ +FileReadStream.prototype.resume = function() { + var previousState = this._paused; + if (this._paused) { + this._paused = false; + + if(previousState === true) { + //Only start to emit data when it's in pause state + this._emitData(); + } + } +}; + +FileReadStream.prototype.finish = function () { + this.destroy(); +}; + +FileReadStream.prototype.destroy = function () { + if (this._destroyed) { + return; + } + + var self = this; + this.readable = false; + + function close(fd) { + fs.close(fd || self._fd, function(err) { + if (err) { + self.emit('error', err); + } + else { + self.emit('close'); + } + }); + self._fd = null; + self._destroyed = true; + } + + // when the stream is closed immediately after creating it + if (!validator.isInt('' + this._fd)) { + this.once('open', close); + return; + } + + close(); +}; + +FileReadStream.prototype.stop = function () { + this.destroy(); + this._streamEnded = true; + this.emit('end'); +}; + +module.exports = FileReadStream; diff --git a/src/node_modules/azure-storage/lib/common/streams/rangestream.js b/src/node_modules/azure-storage/lib/common/streams/rangestream.js new file mode 100644 index 0000000..9043191 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/rangestream.js @@ -0,0 +1,293 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var azureCommon = require('./../common.core'); +var azureutil = azureCommon.util; +var Constants = require('./../util/constants'); +var EventEmitter = require('events').EventEmitter; + +/** +* Range stream +*/ +function RangeStream(serviceClient, container, blob, options) { + this.serviceClient = serviceClient; + this._emitter = new EventEmitter(); + this._paused = false; + this._emittedAll = false; + this._emittedRangeIndex = 0; + this._rangelist = []; + this._resourcePath = []; + this._isEmitting = false; + this._rangeStreamEnded = false; + this._lengthHeader = Constants.HeaderConstants.CONTENT_LENGTH; + this._minRangeSize = Constants.BlobConstants.MIN_WRITE_PAGE_SIZE_IN_BYTES; + this._maxRangeSize = Constants.BlobConstants.DEFAULT_WRITE_PAGE_SIZE_IN_BYTES; + if (options.rangeStart) { + this._startOffset = options.rangeStart; + } else { + this._startOffset = 0; + } + this._dataOffset = this._startOffset; + if (options.rangeEnd) { + this._endOffset = options.rangeEnd; + } else { + this._endOffset = Number.MAX_VALUE; + } + if (container) { + this._resourcePath.push(container); + } + if (blob) { + this._resourcePath.push(blob); + } +} + +/** +* Get range list +*/ +RangeStream.prototype.list = function (options, callback) { + var start = this._startOffset; + var end; + var singleRangeSize = Constants.BlobConstants.MAX_SINGLE_GET_PAGE_RANGE_SIZE; + + if (this._listFunc === undefined) { + // the default function puts the whole blob into a single list item + this._listFunc = this._defaultListFunc; + end = this._endOffset; + } else { + end = Math.min(this._startOffset + singleRangeSize - 1, this._endOffset); + } + options.rangeStart = start; + if (end != Number.MAX_VALUE) { + options.rangeEnd = end; + } + + var self = this; + var onList = function (error, ranges, response) { + if (error) { + callback(error); + } else { + if (self._rangeStreamEnded) { + return; + } + + var totalSize = parseInt(response.headers[self._lengthHeader], 10); + var endOffset = Math.min(totalSize - 1, self._endOffset); + var rangeEnd = Math.min(end, endOffset); + + if (!ranges.length) { + // convert single object to range + // start >= end means there is no valid regions + ranges.push({ start : start, end : rangeEnd, dataSize: 0 }); + } else if (ranges[ranges.length - 1].end !== rangeEnd) { + // don't forget the zero chunk at the end of range + ranges.push({ start : ranges[ranges.length - 1].end + 1, end : rangeEnd, dataSize: 0 }); + } + + if (end >= endOffset) { + self._rangeStreamEnded = true; + } + self.resizeAndSaveRanges(ranges); + self._startOffset += singleRangeSize; + self._emitRange(); + + // This is only valid when listing pages because when listing with the default function, the "endOffset" will always equal to or greater than the "end". + if (end < endOffset && !self._rangeStreamEnded) { + process.nextTick(function () { + ranges = null; + self.list(options, callback); + self = null; + }); + } + } + }; + + var callArguments = Array.prototype.slice.call(this._resourcePath); + callArguments.push(options); + callArguments.push(onList); + this._listFunc.apply(this.serviceClient, callArguments); +}; + +/** +* Resize regions: +* 1. Merge small pieces into a range no less than this._minRangeSize +* 2. Split large pieces into ranges no more than this._maxRangeSize +*/ +RangeStream.prototype.resizeAndSaveRanges = function (ranges) { + var rangeList = this._rangelist; + var holdingRange = { type : 'range', size : 0, dataSize : 0, start : this._startOffset, end : -1 }; + var readingRange = null; + var rangeSize = 0; + + for (var index = 0; index < ranges.length; index++) { + readingRange = ranges[index]; + rangeSize = readingRange.end - holdingRange.start + 1; + + if (rangeSize < this._minRangeSize) { + // merge fragment ranges + this.mergeRanges(holdingRange, readingRange); + } else { + if (holdingRange.end != -1) { + // save the holding range list and hold the reading range + this.splitAndSaveRanges(holdingRange, rangeList); + holdingRange = readingRange; + } + + if (this._dataOffset != readingRange.start) { + // padding zero for empty range and hold the reading range + this.putZeroRange(this._dataOffset, readingRange.start - 1, rangeList); + holdingRange = readingRange; + } else if (holdingRange.end == -1) { + // if holdingRange is never set, it means readingRange exceeds MIN_WRITE_FILE_SIZE_IN_BYTES + this.splitAndSaveRanges(readingRange, rangeList); + // reading range has been saved, offset the holding start position for calculating the range size in next loop + holdingRange.start = readingRange.end + 1; + } + } + + // If it is the last range, put the holding range into list anyway + if (index == ranges.length - 1 && holdingRange.end > holdingRange.start) { + this.splitAndSaveRanges(holdingRange, rangeList); + } + + this._dataOffset = readingRange.end + 1; + } +}; + +/** +* Put a zero range into range list +*/ +RangeStream.prototype.putZeroRange = function (startOffset, endOffset, rangeList) { + var zeroDataRange = { type : 'range', size : -1, dataSize : 0, start : startOffset, end : endOffset }; + this.splitAndSaveRanges(zeroDataRange, rangeList); +}; + +/** +* Merge small ranges +*/ +RangeStream.prototype.mergeRanges = function (holdingRange, readingRange) { + holdingRange.size = readingRange.end - holdingRange.start + 1; + holdingRange.dataSize += readingRange.dataSize; + holdingRange.end = readingRange.end; + return holdingRange; +}; + +/** +* Split range into small pieces with maximum _maxRangeSize and minimum _minRangeSize size. +* For example, [0, 10G - 1] => [0, 4MB - 1], [4MB, 8MB - 1] ... [10GB - 4MB, 10GB - 1] +*/ +RangeStream.prototype.splitAndSaveRanges = function (range, rangeList) { + var rangeSize = range.end - range.start + 1; + var offset = range.start; + var limitedSize = 0; + + while (rangeSize > 0) { + var newRange = { type : 'range', size : 0, dataSize : 0, start : -1, end : -1 }; + limitedSize = Math.min(rangeSize, this._maxRangeSize); + newRange.start = offset; + newRange.size = limitedSize; + if (range.dataSize === 0) { + newRange.dataSize = 0; + } else { + newRange.dataSize = limitedSize; + } + offset += limitedSize; + newRange.end = offset - 1; + rangeList.push(newRange); + rangeSize -= limitedSize; + } +}; + +/** +* Emit a range +*/ +RangeStream.prototype._emitRange = function () { + if (this._paused || this._emittedAll || this._isEmitting) return; + this._isEmitting = true; + try { + for (; this._emittedRangeIndex < this._rangelist.length; this._emittedRangeIndex++) { + if (this._paused) { + return; + } + var range = this._rangelist[this._emittedRangeIndex]; + this._emitter.emit('range', range); + this._rangelist[this._emittedRangeIndex] = null; + } + + if (this._rangeStreamEnded) { + this._rangelist = null; + this._emittedAll = true; + this._emitter.emit('end'); + } + } finally { + this._isEmitting = false; + } +}; + +/** +* The Default list function which puts the whole blob into one range. +*/ +RangeStream.prototype._defaultListFunc = function (container, blob, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + this.getBlobProperties(container, blob, options, function (error, result, response) { + if (error) { + callback(error); + } else { + var range = [{}]; + range[0].start = options.rangeStart ? Math.max(options.rangeStart, 0) : 0; + range[0].end = options.rangeEnd ? Math.min(options.rangeEnd, result.contentLength - 1) : result.contentLength - 1; + range[0].size = range[0].end - range[0].start + 1; + range[0].dataSize = range[0].size; + callback(error, range, response); + } + }); +}; + +/** +* Add event listener +*/ +RangeStream.prototype.on = function (event, listener) { + this._emitter.on(event, listener); +}; + +/** +* Pause the stream +*/ +RangeStream.prototype.pause = function () { + this._paused = true; +}; + +/** +* Resume the stream +*/ +RangeStream.prototype.resume = function () { + this._paused = false; + if (!this._isEmitting) { + this._emitRange(); + } +}; + +/** +* Stop the stream +*/ +RangeStream.prototype.stop = function () { + this.pause(); + this._emittedAll = true; + this._emitter.emit('end'); +}; + +module.exports = RangeStream; diff --git a/src/node_modules/azure-storage/lib/common/streams/readablefs.js b/src/node_modules/azure-storage/lib/common/streams/readablefs.js new file mode 100644 index 0000000..9601935 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/readablefs.js @@ -0,0 +1,32 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var rs = require('readable-stream').Readable; +var fs = require('fs'); + +/* +* As far as streams support goes, we can get the node 0.10 stream API in node 0.8. +* Use the readable-stream module (https://www.npmjs.org/package/readable-stream) which is +* essentially a copy of the stream modules from core node 0.10 and it just works on both 0.8 and 0.10. +*/ + +exports.createReadStream = function(path, options) { + var stream = fs.createReadStream(path, options); + if (/^v0\.8\./.test(process.version)) { + stream = rs().wrap(stream); + } + return stream; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/streams/speedsummary.js b/src/node_modules/azure-storage/lib/common/streams/speedsummary.js new file mode 100644 index 0000000..79185cd --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/streams/speedsummary.js @@ -0,0 +1,215 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var EventEmitter = require('events'); +var util = require('util'); +var azureutil = require('../util/util'); + +/** +* Blob upload/download speed summary. +* Trigger 'progress' event every progress updates. +*/ +function SpeedSummary (name) { + this.name = name; + this._startTime = Date.now(); + this._timeWindowInSeconds = 10; + this._timeWindow = this._timeWindowInSeconds * 1000; + this._totalWindowSize = 0; + this._speedTracks = new Array(this._timeWindowInSeconds); + this._speedTrackPtr = 0; + this.totalSize = undefined; + this.completeSize = 0; +} + +util.inherits(SpeedSummary, EventEmitter); + +/** +* Convert the size to human readable size +*/ +function toHumanReadableSize(size, len) { + if(!size) return '0B'; + if (!len || len <= 0) { + len = 2; + } + var units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + var i = Math.floor( Math.log(size) / Math.log(1024)); + return (size/Math.pow(1024, i)).toFixed(len) + units[i]; +} + +/** +* Get running seconds +*/ +SpeedSummary.prototype.getElapsedSeconds = function(humanReadable) { + var now = Date.now(); + var seconds = parseInt((now - this._startTime) / 1000, 10); + if (humanReadable !== false) { + var s = parseInt(seconds % 60, 10); + seconds /= 60; + var m = Math.floor(seconds % 60); + seconds /= 60; + var h = Math.floor(seconds); + seconds = util.format('%s:%s:%s', azureutil.zeroPaddingString(h, 2), azureutil.zeroPaddingString(m, 2), azureutil.zeroPaddingString(s, 2)); + } + return seconds; +}; + +/** +* Get complete percentage +* @param {int} len The number of digits after the decimal point. +*/ +SpeedSummary.prototype.getCompletePercent = function(len) { + if (this.totalSize) { + if(!len || len <= 0) { + len = 1; + } + return (this.completeSize * 100 / this.totalSize).toFixed(len); + } else { + if(this.totalSize === 0) { + return 100; + } else { + return 0; + } + } +}; + +/** +* Get average upload/download speed +*/ +SpeedSummary.prototype.getAverageSpeed = function(humanReadable) { + var elapsedTime = this.getElapsedSeconds(false); + return this._getInternalSpeed(this.completeSize, elapsedTime, humanReadable); +}; + +/** +* Get instant speed +*/ +SpeedSummary.prototype.getSpeed = function(humanReadable) { + this._refreshSpeedTracks(); + var elapsedTime = this.getElapsedSeconds(false); + elapsedTime = Math.min(elapsedTime, this._timeWindowInSeconds); + return this._getInternalSpeed(this._totalWindowSize, elapsedTime, humanReadable); +}; + +/** +* Get internal speed +*/ +SpeedSummary.prototype._getInternalSpeed = function(totalSize, elapsedTime, humanReadable) { + if (elapsedTime <= 0) { + elapsedTime = 1; + } + var speed = totalSize / elapsedTime; + if(humanReadable !== false) { + speed = toHumanReadableSize(speed) + '/s'; + } + return speed; +}; + +/** +* Refresh speed tracks +*/ +SpeedSummary.prototype._refreshSpeedTracks = function() { + var now = Date.now(); + var totalSize = 0; + for(var i = 0; i < this._speedTracks.length; i++) { + if(!this._speedTracks[i]) continue; + if(now - this._speedTracks[i].timeStamp <= this._timeWindow) { + totalSize += this._speedTracks[i].size; + } else { + this._speedTracks[i] = null; + } + } + this._totalWindowSize = totalSize; +}; + +/** +* Increment the complete data size +*/ +SpeedSummary.prototype.increment = function(len) { + this.completeSize += len; + this._recordSpeed(len); + + var that = this; + process.nextTick(function () { + that.emit('progress'); + }); + + return this.completeSize; +}; + +/** +* record complete size into speed tracks +*/ +SpeedSummary.prototype._recordSpeed = function(completeSize) { + var now = Date.now(); + var track = this._speedTracks[this._speedTrackPtr]; + if(track) { + var timeDiff = now - track.timeStamp; + if(timeDiff > this._timeWindow) { + track.timeStamp = now; + track.size = completeSize; + } else if(timeDiff <= 1000) { //1 seconds + track.size += completeSize; + } else { + this._speedTrackPtr = (this._speedTrackPtr + 1) % this._timeWindowInSeconds; + this._recordSpeed(completeSize); + } + } else { + track = {timeStamp : now, size: completeSize}; + this._speedTracks[this._speedTrackPtr] = track; + } +}; + +/** +* Get auto increment function +*/ +SpeedSummary.prototype.getAutoIncrementFunction = function(size) { + var self = this; + return function(error, retValue) { + if(!error) { + var doneSize = 0; + if((!retValue && retValue !== 0) || isNaN(retValue)) { + doneSize = size; + } else { + doneSize = retValue; + } + self.increment(doneSize); + } + }; +}; + +/** +* Get total size +*/ +SpeedSummary.prototype.getTotalSize = function(humanReadable) { + if (humanReadable !== false) { + return toHumanReadableSize(this.totalSize); + } else { + return this.totalSize; + } +}; + +/** +* Get completed data size +*/ +SpeedSummary.prototype.getCompleteSize = function(humanReadable) { + if (humanReadable !== false) { + return toHumanReadableSize(this.completeSize); + } else { + return this.completeSize; + } +}; + +module.exports = SpeedSummary; diff --git a/src/node_modules/azure-storage/lib/common/util/accesscondition.js b/src/node_modules/azure-storage/lib/common/util/accesscondition.js new file mode 100644 index 0000000..f89c8ef --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/accesscondition.js @@ -0,0 +1,166 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'AccessCondition'. + +/** +* Defines constants, enums, and utility functions for use with storage access condition. +* @namespace +*/ + +'use strict'; + +exports = module.exports; + +/** +* Constructs an empty access condition. +* +* @return {object} An empty AccessCondition object +*/ +exports.generateEmptyCondition = function () { + return {}; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource does not exist on the service +* +* Setting this access condition modifies the request to include the HTTP If-None-Match conditional header + +* @return {AccessConditions} An AccessCondition object that represents a condition that checks for nonexistence +*/ +exports.generateIfNotExistsCondition = function () { + var accessCondition = {}; + accessCondition.EtagNonMatch = '*'; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource exists on the service +* +* Setting this access condition modifies the request to include the HTTP If-Match conditional header + +* @return {AccessConditions} An AccessCondition object that represents a condition that checks for existence +*/ +exports.generateIfExistsCondition = function () { + var accessCondition = {}; + accessCondition.EtagMatch = '*'; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's ETag value +* does not match the specified ETag value +* +* Setting this access condition modifies the request to include the HTTP If-None-Match conditional header +* +* @param {string} etag The ETag value to check against the resource's ETag +* @return {AccessConditions} An AccessCondition object that represents the If-None-Match condition +*/ +exports.generateIfNoneMatchCondition = function (etag) { + var accessCondition = {}; + accessCondition.EtagNonMatch = etag; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's ETag value +* matches the specified ETag value +* +* Setting this access condition modifies the request to include the HTTP If-Match conditional header +* +* @param {string} etag The ETag value to check against the resource's ETag +* @return {AccessConditions} An AccessCondition object that represents the If-Match condition +*/ +exports.generateIfMatchCondition = function (etag) { + var accessCondition = {}; + accessCondition.EtagMatch = etag; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource has been +* modified since the specified time +* +* Setting this access condition modifies the request to include the HTTP If-Modified-Since conditional header +* +* @param {Date|string} time A date object specifying the time since which the resource must have been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Modified-Since condition +*/ +exports.generateIfModifiedSinceCondition = function (time) { + var accessCondition = {}; + accessCondition.DateModifedSince = time; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource has not been +* modified since the specified time +* +* Setting this access condition modifies the request to include the HTTP If-Unmodified-Since conditional header +* +* @param {Date|string} time A date object specifying the time since which the resource must have not been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Unmodified-Since condition +*/ +exports.generateIfNotModifiedSinceCondition = function (time) { + var accessCondition = {}; + accessCondition.DateUnModifiedSince = time; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's sequence number +* is equal to the specified value +* +* Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-eq conditional header +* +* @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Unmodified-Since condition +*/ +exports.generateSequenceNumberEqualCondition = function (sequenceNumber) { + var accessCondition = {}; + accessCondition.SequenceNumberEqual = sequenceNumber; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's sequence number +* is less than the specified value +* +* Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-lt conditional header +* +* @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Unmodified-Since condition +*/ +exports.generateSequenceNumberLessThanCondition = function (sequenceNumber) { + var accessCondition = {}; + accessCondition.SequenceNumberLessThan = sequenceNumber; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's sequence number +* is less than or equal to the specified value +* +* Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-le conditional header +* +* @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Unmodified-Since condition +*/ +exports.generateSequenceNumberLessThanOrEqualCondition = function (sequenceNumber) { + var accessCondition = {}; + accessCondition.SequenceNumberLessThanOrEqual = sequenceNumber; + return accessCondition; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/util/constants.js b/src/node_modules/azure-storage/lib/common/util/constants.js new file mode 100644 index 0000000..4d8dc80 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/constants.js @@ -0,0 +1,2607 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'Constants'. +exports = module.exports; + +var storageDnsSuffix = process.env.AZURE_STORAGE_DNS_SUFFIX || 'core.windows.net'; + +/** +* Defines constants. +*/ +var Constants = { + /* + * Specifies the value to use for UserAgent header. + * + * @const + * @type {string} + */ + USER_AGENT_PRODUCT_NAME: 'Azure-Storage', + + /* + * Specifies the value to use for UserAgent header. + * + * @const + * @type {string} + */ + USER_AGENT_PRODUCT_VERSION: '2.10.3', + + /** + * The number of default concurrent requests for parallel operation. + * + * @const + * @type {int} + */ + DEFAULT_PARALLEL_OPERATION_THREAD_COUNT: 5, + + /** + * The value of default socket reuse for batch operation. + * + * @const + * @type {boolean} + */ + DEFAULT_ENABLE_REUSE_SOCKET: true, + + /** + * Constant representing a kilobyte (Non-SI version). + * + * @const + * @type {int} + */ + KB: 1024, + + /** + * Constant representing a megabyte (Non-SI version). + * + * @const + * @type {int} + */ + MB: 1024 * 1024, + + /** + * Constant representing a gigabyte (Non-SI version). + * + * @const + * @type {int} + */ + GB: 1024 * 1024 * 1024, + + /** + * Specifies HTTP. + * + * @const + * @type {string} + */ + HTTP: 'http:', + + /** + * Specifies HTTPS. + * + * @const + * @type {string} + */ + HTTPS: 'https:', + + /** + * Default HTTP port. + * + * @const + * @type {int} + */ + DEFAULT_HTTP_PORT: 80, + + /** + * Default HTTPS port. + * + * @const + * @type {int} + */ + DEFAULT_HTTPS_PORT: 443, + + /** + * Default client request timeout in milliseconds. + * Integer containing the number of milliseconds to wait for a server to send response headers (and start the response body) before aborting the request. + * 2 minutes by default. + * + * @const + * @type {int} + */ + DEFAULT_CLIENT_REQUEST_TIMEOUT_IN_MS: 120000, + + /** + * Marker for atom metadata. + * + * @const + * @type {string} + */ + XML_METADATA_MARKER: '$', + + /** + * Marker for atom value. + * + * @const + * @type {string} + */ + XML_VALUE_MARKER: '_', + + /** + * Defines the service types indicators. + * + * @const + * @enum {string} + */ + ServiceType: { + Blob: 'blob', + Queue: 'queue', + Table: 'table', + File: 'file' + }, + + /** + * Specifies the location used to indicate which location the operation can be performed against. + * + * @const + * @enum {int} + */ + RequestLocationMode: { + PRIMARY_ONLY: 0, + SECONDARY_ONLY: 1, + PRIMARY_OR_SECONDARY: 2 + }, + + /** + * Represents a storage service location. + * + * @const + * @enum {int} + */ + StorageLocation: { + PRIMARY: 0, + SECONDARY: 1 + }, + + /** + * Defines constants for use with account SAS. + */ + AccountSasConstants:{ + /** + * Permission types. + * + * @const + * @enum {string} + */ + Permissions: { + READ: 'r', + ADD: 'a', + CREATE: 'c', + UPDATE: 'u', + PROCESS: 'p', + WRITE: 'w', + DELETE: 'd', + LIST: 'l' + }, + + /** + * Services types. + * + * @const + * @enum {string} + */ + Services: { + BLOB: 'b', + FILE: 'f', + QUEUE: 'q', + TABLE: 't' + }, + + /** + * Resources types. + * + * @const + * @enum {string} + */ + Resources: { + SERVICE: 's', + CONTAINER: 'c', + OBJECT: 'o' + }, + + /** + * Protocols types. + * + * @const + * @enum {string} + */ + Protocols: { + HTTPSONLY: 'https', + HTTPSORHTTP: 'https,http' + } + }, + + /** + * Defines constants for use with shared access policies. + */ + AclConstants: { + /** + * XML element for an access policy. + * + * @const + * @type {string} + */ + ACCESS_POLICY: 'AccessPolicy', + + /** + * XML element for the end time of an access policy. + * + * @const + * @type {string} + */ + EXPIRY: 'Expiry', + + /** + * XML attribute for IDs. + * + * @const + * @type {string} + */ + ID: 'Id', + + /** + * XML element for the permission of an access policy. + * + * @const + * @type {string} + */ + PERMISSION: 'Permission', + + /** + * XML element for a signed identifier. + * + * @const + * @type {string} + */ + SIGNED_IDENTIFIER_ELEMENT: 'SignedIdentifier', + + /** + * XML element for signed identifiers. + * + * @const + * @type {string} + */ + SIGNED_IDENTIFIERS_ELEMENT: 'SignedIdentifiers', + + /** + * XML element for the start time of an access policy. + * + * @const + * @type {string} + */ + START: 'Start' + }, + + /** + * Defines constants for use with service properties. + */ + ServicePropertiesConstants: { + /** + * XML element for storage service properties. + * + * @const + * @type {string} + */ + STORAGE_SERVICE_PROPERTIES_ELEMENT: 'StorageServiceProperties', + + /** + * Default analytics version to send for logging, hour metrics and minute metrics. + * + * @const + * @type {string} + */ + DEFAULT_ANALYTICS_VERSION: '1.0', + + /** + * XML element for logging. + * + * @const + * @type {string} + */ + LOGGING_ELEMENT: 'Logging', + + /** + * XML element for version. + * + * @const + * @type {string} + */ + VERSION_ELEMENT: 'Version', + + /** + * XML element for delete. + * + * @const + * @type {string} + */ + DELETE_ELEMENT: 'Delete', + + /** + * XML element for read. + * + * @const + * @type {string} + */ + READ_ELEMENT: 'Read', + + /** + * XML element for write. + * + * @const + * @type {string} + */ + WRITE_ELEMENT: 'Write', + + /** + * XML element for retention policy. + * + * @const + * @type {string} + */ + RETENTION_POLICY_ELEMENT: 'RetentionPolicy', + + /** + * XML element for enabled. + * + * @const + * @type {string} + */ + ENABLED_ELEMENT: 'Enabled', + + /** + * XML element for days. + * + * @const + * @type {string} + */ + DAYS_ELEMENT: 'Days', + + /** + * XML element for HourMetrics. + * + * @const + * @type {string} + */ + HOUR_METRICS_ELEMENT: 'HourMetrics', + + /** + * XML element for MinuteMetrics. + * + * @const + * @type {string} + */ + MINUTE_METRICS_ELEMENT: 'MinuteMetrics', + + /** + * XML element for Cors. + * + * @const + * @type {string} + */ + CORS_ELEMENT: 'Cors', + + /** + * XML element for CorsRule. + * + * @const + * @type {string} + */ + CORS_RULE_ELEMENT: 'CorsRule', + + /** + * XML element for AllowedOrigins. + * + * @const + * @type {string} + */ + ALLOWED_ORIGINS_ELEMENT: 'AllowedOrigins', + + /** + * XML element for AllowedMethods. + * + * @const + * @type {string} + */ + ALLOWED_METHODS_ELEMENT: 'AllowedMethods', + + /** + * XML element for MaxAgeInSeconds. + * + * @const + * @type {string} + */ + MAX_AGE_IN_SECONDS_ELEMENT: 'MaxAgeInSeconds', + + /** + * XML element for ExposedHeaders. + * + * @const + * @type {string} + */ + EXPOSED_HEADERS_ELEMENT: 'ExposedHeaders', + + /** + * XML element for AllowedHeaders. + * + * @const + * @type {string} + */ + ALLOWED_HEADERS_ELEMENT: 'AllowedHeaders', + + /** + * XML element for IncludeAPIs. + * + * @const + * @type {string} + */ + INCLUDE_APIS_ELEMENT: 'IncludeAPIs', + + /** + * XML element for DefaultServiceVersion. + * + * @const + * @type {string} + */ + DEFAULT_SERVICE_VERSION_ELEMENT: 'DefaultServiceVersion', + + /** + * XML element for DeleteRetentionPolicy. + * + * @const + * @type {string} + */ + DEFAULT_DELETE_RETENTION_POLICY_ELEMENT: 'DeleteRetentionPolicy', + + /** + * XML element for StaticWebsite. + * + * @const + * @type {string} + */ + DEFAULT_STATIC_WEBSITE_ELEMENT: 'StaticWebsite', + + /** + * XML element for StaticWebsite/IndexDocument. + * + * @const + * @type {string} + */ + DEFAULT_INDEX_DOCUMENT_ELEMENT: 'IndexDocument', + + /** + * XML element for StaticWebsite/ErrorDocument404Path. + * + * @const + * @type {string} + */ + DEFAULT_ERROR_DOCUMENT_404_PATH_ELEMENT: 'ErrorDocument404Path' + }, + + /** + * Defines constants for use with blob operations. + */ + BlobConstants: { + /** + * XML element for the latest. + * + * @const + * @type {string} + */ + LATEST_ELEMENT: 'Latest', + + /** + * XML element for uncommitted blocks. + * + * @const + * @type {string} + */ + UNCOMMITTED_ELEMENT: 'Uncommitted', + + /** + * XML element for a block list. + * + * @const + * @type {string} + */ + BLOCK_LIST_ELEMENT: 'BlockList', + + /** + * XML element for committed blocks. + * + * @const + * @type {string} + */ + COMMITTED_ELEMENT: 'Committed', + + /** + * The default write page size, in bytes, used by blob streams. + * + * @const + * @type {int} + */ + DEFAULT_WRITE_PAGE_SIZE_IN_BYTES: 4 * 1024 * 1024, + + /** + * The minimum write page size, in bytes, used by blob streams. + * + * @const + * @type {int} + */ + MIN_WRITE_PAGE_SIZE_IN_BYTES: 2 * 1024 * 1024, + + /** + * The default maximum size, in bytes, of a blob before it must be separated into blocks. + * + * @const + * @type {int} + */ + DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES: 32 * 1024 * 1024, + + /** + * The default write block size, in bytes, used by blob streams. + * + * @const + * @type {int} + */ + DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES: 4 * 1024 * 1024, + + /** + * The default critical memory limitation in 32bit Node.js environment, in bytes. + * + * @const + * @type {int} + */ + DEFAULT_CRITICAL_MEMORY_LIMITATION_32_IN_BYTES: 800 * 1024 * 1024, + + /** + * The default critical memory limitation in browser environment, in bytes. + * + * @const + * @type {int} + */ + DEFAULT_CRITICAL_MEMORY_LIMITATION_BROWSER_IN_BYTES: 1 * 1024 * 1024 * 1024, + + /** + * The default minimum memory usage in browser environment, in bytes. + * + * @const + * @type {int} + */ + DEFAULT_MINIMUM_MEMORY_USAGE_BROWSER_IN_BYTES: 4 * 1024 * 1024, + + /** + * The maximum size of a single block of block blob. + * + * @const + * @type {int} + */ + MAX_BLOCK_BLOB_BLOCK_SIZE: 100 * 1024 * 1024, + + /** + * The maximum size of a single block of append blob. + * + * @const + * @type {int} + */ + MAX_APPEND_BLOB_BLOCK_SIZE: 4 * 1024 * 1024, + + /** + * The maximum size, in bytes, of a blob before it must be separated into blocks. + * + * @const + * @type {int} + */ + MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES: 64 * 1024 * 1024, + + /** + * The maximum range get size when requesting for a contentMD5. + * + * @const + * @type {int} + */ + MAX_RANGE_GET_SIZE_WITH_MD5 : 4 * 1024 * 1024, + + /** + * The maximum page range size for a page update operation. + * + * @const + * @type {int} + */ + MAX_UPDATE_PAGE_SIZE : 4 * 1024 * 1024, + + /** + * The maximum buffer size for writing a stream buffer. + * + * @const + * @type {int} + */ + MAX_QUEUED_WRITE_DISK_BUFFER_SIZE : 64 * 1024 * 1024, + + /** + * Max size for single get page range. The max value should be 150MB. + * http://blogs.msdn.com/b/windowsazurestorage/archive/2012/03/26/getting-the-page-ranges-of-a-large-page-blob-in-segments.aspx + * + * @const + * @type {int} + */ + MAX_SINGLE_GET_PAGE_RANGE_SIZE : 37 * 4 * 1024 * 1024, + + /** + * The size of a page, in bytes, in a page blob. + * + * @const + * @type {int} + */ + PAGE_SIZE: 512, + + /** + * Resource types. + * + * @const + * @enum {string} + */ + ResourceTypes: { + CONTAINER: 'c', + BLOB: 'b' + }, + + /** + * List blob types. + * + * @const + * @enum {string} + */ + ListBlobTypes: { + Blob: 'b', + Directory: 'd' + }, + + /** + * Put page write options + * + * @const + * @enum {string} + */ + PageWriteOptions: { + UPDATE: 'update', + CLEAR: 'clear' + }, + + /** + * Blob types + * + * @const + * @enum {string} + */ + BlobTypes: { + BLOCK: 'BlockBlob', + PAGE: 'PageBlob', + APPEND: 'AppendBlob' + }, + + /** + * Blob lease constants + * + * @const + * @enum {string} + */ + LeaseOperation: { + ACQUIRE: 'acquire', + RENEW: 'renew', + CHANGE: 'change', + RELEASE: 'release', + BREAK: 'break' + } + }, + + /** + * Defines constants for use with file operations. + */ + FileConstants: { + /** + * The default write size, in bytes, used by file streams. + * + * @const + * @type {int} + */ + DEFAULT_WRITE_SIZE_IN_BYTES: 4 * 1024 * 1024, + + /** + * The maximum range size when requesting for a contentMD5. + * + * @const + * @type {int} + */ + MAX_RANGE_GET_SIZE_WITH_MD5 : 4 * 1024 * 1024, + + /** + * The maximum range size for a file update operation. + * + * @const + * @type {int} + */ + MAX_UPDATE_FILE_SIZE : 4 * 1024 * 1024, + + /** + * The default minimum size, in bytes, of a file when it must be separated into ranges. + * + * @const + * @type {int} + */ + DEFAULT_SINGLE_FILE_GET_THRESHOLD_IN_BYTES: 32 * 1024 * 1024, + + /** + * The minimum write file size, in bytes, used by file streams. + * + * @const + * @type {int} + */ + MIN_WRITE_FILE_SIZE_IN_BYTES: 2 * 1024 * 1024, + + /** + * Put range write options + * + * @const + * @enum {string} + */ + RangeWriteOptions: { + UPDATE: 'update', + CLEAR: 'clear' + }, + + /** + * Resource types. + * + * @const + * @enum {string} + */ + ResourceTypes: { + SHARE: 's', + FILE: 'f' + } + }, + + /** + * Defines constants for use with queue storage. + */ + QueueConstants: { + /** + * XML element for QueueMessage. + * + * @const + * @type {string} + */ + QUEUE_MESSAGE_ELEMENT: 'QueueMessage', + + /** + * XML element for MessageText. + * + * @const + * @type {string} + */ + MESSAGE_TEXT_ELEMENT: 'MessageText' + }, + + /** + * Defines constants for use with table storage. + */ + TableConstants: { + /** + * The changeset response delimiter. + * + * @const + * @type {string} + */ + CHANGESET_DELIMITER: '--changesetresponse_', + + /** + * The batch response delimiter. + * + * @const + * @type {string} + */ + BATCH_DELIMITER: '--batchresponse_', + + /** + * The next continuation row key token. + * + * @const + * @type {string} + */ + CONTINUATION_NEXT_ROW_KEY: 'x-ms-continuation-nextrowkey', + + /** + * The next continuation partition key token. + * + * @const + * @type {string} + */ + CONTINUATION_NEXT_PARTITION_KEY: 'x-ms-continuation-nextpartitionkey', + + /** + * The next continuation table name token. + * + * @const + * @type {string} + */ + CONTINUATION_NEXT_TABLE_NAME: 'x-ms-continuation-nexttablename', + + /** + * The next row key query string argument. + * + * @const + * @type {string} + */ + NEXT_ROW_KEY: 'NextRowKey', + + /** + * The next partition key query string argument. + * + * @const + * @type {string} + */ + NEXT_PARTITION_KEY: 'NextPartitionKey', + + /** + * The next table name query string argument. + * + * @const + * @type {string} + */ + NEXT_TABLE_NAME: 'NextTableName', + + /** + * Prefix of the odata properties returned in a JSON query. + * + * @const + * @type {string} + */ + ODATA_PREFIX: 'odata.', + + /** + * Constant representing the string following a type annotation in a JSON table query. + * + * @const + * @type {string} + */ + ODATA_TYPE_SUFFIX: '@odata.type', + + /** + * Constant representing the property where the odata metadata elements are stored. + * + * @const + * @type {string} + */ + ODATA_METADATA_MARKER: '.metadata', + + /** + * Constant representing the value for an entity property. + * + * @const + * @type {string} + */ + ODATA_VALUE_MARKER: '_', + + /** + * Constant representing the type for an entity property. + * + * @const + * @type {string} + */ + ODATA_TYPE_MARKER: '$', + + /** + * The value to set the maximum data service version header. + * + * @const + * @type {string} + */ + DEFAULT_DATA_SERVICE_VERSION: '3.0;NetFx', + + /** + * The name of the property that stores the table name. + * + * @const + * @type {string} + */ + TABLE_NAME: 'TableName', + + /** + * The name of the special table used to store tables. + * + * @const + * @type {string} + */ + TABLE_SERVICE_TABLE_NAME: 'Tables', + + /** + * Operations. + * + * @const + * @enum {string} + */ + Operations: { + RETRIEVE: 'RETRIEVE', + INSERT: 'INSERT', + REPLACE: 'REPLACE', + MERGE: 'MERGE', + DELETE: 'DELETE', + INSERT_OR_REPLACE: 'INSERT_OR_REPLACE', + INSERT_OR_MERGE: 'INSERT_OR_MERGE' + } + }, + + /** + * Defines constants for use with HTTP headers. + */ + HeaderConstants: { + /** + * The accept ranges header. + * + * @const + * @type {string} + */ + ACCEPT_RANGES: 'accept_ranges', + + /** + * The content transfer encoding header. + * + * @const + * @type {string} + */ + CONTENT_TRANSFER_ENCODING: 'content-transfer-encoding', + + /** + * The transfer encoding header. + * + * @const + * @type {string} + */ + TRANSFER_ENCODING: 'transfer-encoding', + + /** + * The server header. + * + * @const + * @type {string} + */ + SERVER: 'server', + + /** + * The location header. + * + * @const + * @type {string} + */ + LOCATION: 'location', + + /** + * The Last-Modified header. + * + * @const + * @type {string} + */ + LAST_MODIFIED: 'Last-Modified', + + /** + * The creation time header. + * + * @const + * @type {string} + */ + CREATION_TIME: 'x-ms-creation-time', + + /** + * The data service version. + * + * @const + * @type {string} + */ + DATA_SERVICE_VERSION: 'dataserviceversion', + + /** + * The maximum data service version. + * + * @const + * @type {string} + */ + MAX_DATA_SERVICE_VERSION: 'maxdataserviceversion', + + /** + * The master Windows Azure Storage header prefix. + * + * @const + * @type {string} + */ + PREFIX_FOR_STORAGE: 'x-ms-', + + /** + * The client request Id header. + * + * @const + * @type {string} + */ + CLIENT_REQUEST_ID: 'x-ms-client-request-id', + + /** + * The header that specifies the approximate message count of a queue. + * + * @const + * @type {string} + */ + APPROXIMATE_MESSAGES_COUNT: 'x-ms-approximate-messages-count', + + /** + * The Authorization header. + * + * @const + * @type {string} + */ + AUTHORIZATION: 'authorization', + + /** + * The header that is used to avoid browser cache. + * + * @const + * @type {string} + */ + FORCE_NO_CACHE_IN_BROWSER: '_', + + /** + * The header that specifies public access to blobs. + * + * @const + * @type {string} + */ + BLOB_PUBLIC_ACCESS: 'x-ms-blob-public-access', + + /** + * The header that specifies container immutability policy. + * + * @const + * @type {boolean} + */ + HAS_IMMUTABILITY_POLICY: 'x-ms-has-immutability-policy', + + /** + * The header that specifies container has legal hold. + * + * @const + * @type {boolean} + */ + HAS_LEGAL_HOLD: 'x-ms-has-legal-hold', + + /** + * The header for the blob type. + * + * @const + * @type {string} + */ + BLOB_TYPE: 'x-ms-blob-type', + + /** + * The header for the type. + * + * @const + * @type {string} + */ + TYPE: 'x-ms-type', + + /** + * Specifies the block blob type. + * + * @const + * @type {string} + */ + BLOCK_BLOB: 'blockblob', + + /** + * The CacheControl header. + * + * @const + * @type {string} + */ + CACHE_CONTROL: 'cache-control', + + /** + * The header that specifies blob caching control. + * + * @const + * @type {string} + */ + BLOB_CACHE_CONTROL: 'x-ms-blob-cache-control', + + /** + * The header that specifies caching control. + * + * @const + * @type {string} + */ + FILE_CACHE_CONTROL: 'x-ms-cache-control', + + /** + * The copy status. + * + * @const + * @type {string} + */ + COPY_STATUS: 'x-ms-copy-status', + + /** + * The copy completion time + * + * @const + * @type {string} + */ + COPY_COMPLETION_TIME: 'x-ms-copy-completion-time', + + /** + * The copy status message + * + * @const + * @type {string} + */ + COPY_STATUS_DESCRIPTION: 'x-ms-copy-status-description', + + /** + * The copy identifier. + * + * @const + * @type {string} + */ + COPY_ID: 'x-ms-copy-id', + + /** + * Progress of any copy operation + * + * @const + * @type {string} + */ + COPY_PROGRESS: 'x-ms-copy-progress', + + /** + * The copy action. + * + * @const + * @type {string} + */ + COPY_ACTION: 'x-ms-copy-action', + + /** + * Flag if the blob is incremental copy blob. + * + * @const + * @type {string} + */ + INCREMENTAL_COPY: 'x-ms-incremental-copy', + + /** + * Snapshot time of the last successful incremental copy snapshot for this blob. + * + * @const + * @type {string} + */ + COPY_DESTINATION_SNAPSHOT: 'x-ms-copy-destination-snapshot', + + /** + * The ContentID header. + * + * @const + * @type {string} + */ + CONTENT_ID: 'content-id', + + /** + * The ContentEncoding header. + * + * @const + * @type {string} + */ + CONTENT_ENCODING: 'content-encoding', + + /** + * The header that specifies blob content encoding. + * + * @const + * @type {string} + */ + BLOB_CONTENT_ENCODING: 'x-ms-blob-content-encoding', + + /** + * The header that specifies content encoding. + * + * @const + * @type {string} + */ + FILE_CONTENT_ENCODING: 'x-ms-content-encoding', + + /** + * The ContentLangauge header. + * + * @const + * @type {string} + */ + CONTENT_LANGUAGE: 'content-language', + + /** + * The header that specifies blob content language. + * + * @const + * @type {string} + */ + BLOB_CONTENT_LANGUAGE: 'x-ms-blob-content-language', + + /** + * The header that specifies content language. + * + * @const + * @type {string} + */ + FILE_CONTENT_LANGUAGE: 'x-ms-content-language', + + /** + * The ContentLength header. + * + * @const + * @type {string} + */ + CONTENT_LENGTH: 'content-length', + + /** + * The header that specifies blob content length. + * + * @const + * @type {string} + */ + BLOB_CONTENT_LENGTH: 'x-ms-blob-content-length', + + /** + * The header that specifies content length. + * + * @const + * @type {string} + */ + FILE_CONTENT_LENGTH: 'x-ms-content-length', + + /** + * The ContentDisposition header. + * @const + * @type {string} + */ + CONTENT_DISPOSITION: 'content-disposition', + + /** + * The header that specifies blob content disposition. + * + * @const + * @type {string} + */ + BLOB_CONTENT_DISPOSITION: 'x-ms-blob-content-disposition', + + /** + * The header that specifies content disposition. + * + * @const + * @type {string} + */ + FILE_CONTENT_DISPOSITION: 'x-ms-content-disposition', + + /** + * The ContentMD5 header. + * + * @const + * @type {string} + */ + CONTENT_MD5: 'content-md5', + + /** + * The header that specifies blob content MD5. + * + * @const + * @type {string} + */ + BLOB_CONTENT_MD5: 'x-ms-blob-content-md5', + + /** + * The header that specifies content MD5. + * + * @const + * @type {string} + */ + FILE_CONTENT_MD5: 'x-ms-content-md5', + + /** + * The ContentRange header. + * + * @const + * @type {string} + */ + CONTENT_RANGE: 'cache-range', + + /** + * The ContentType header. + * + * @const + * @type {string} + */ + CONTENT_TYPE: 'content-type', + + /** + * The header that specifies blob content type. + * + * @const + * @type {string} + */ + BLOB_CONTENT_TYPE: 'x-ms-blob-content-type', + + /** + * The header that specifies content type. + * + * @const + * @type {string} + */ + FILE_CONTENT_TYPE: 'x-ms-content-type', + + /** + * The header for copy source. + * + * @const + * @type {string} + */ + COPY_SOURCE: 'x-ms-copy-source', + + /** + * The header that specifies the date. + * + * @const + * @type {string} + */ + DATE: 'date', + + /** + * The header that specifies the date. + * + * @const + * @type {string} + */ + MS_DATE: 'x-ms-date', + + /** + * The header to delete snapshots. + * + * @const + * @type {string} + */ + DELETE_SNAPSHOT: 'x-ms-delete-snapshots', + + /** + * The ETag header. + * + * @const + * @type {string} + */ + ETAG: 'etag', + + /** + * The IfMatch header. + * + * @const + * @type {string} + */ + IF_MATCH: 'if-match', + + /** + * The IfModifiedSince header. + * + * @const + * @type {string} + */ + IF_MODIFIED_SINCE: 'if-modified-since', + + /** + * The IfNoneMatch header. + * + * @const + * @type {string} + */ + IF_NONE_MATCH: 'if-none-match', + + /** + * The IfUnmodifiedSince header. + * + * @const + * @type {string} + */ + IF_UNMODIFIED_SINCE: 'if-unmodified-since', + + /** + * Specifies snapshots are to be included. + * + * @const + * @type {string} + */ + INCLUDE_SNAPSHOTS_VALUE: 'include', + + /** + * Specifies that the content-type is JSON. + * + * @const + * @type {string} + */ + JSON_CONTENT_TYPE_VALUE: 'application/json', + + /** + * The header that specifies storage SKU, also known as account type. + * + * @const + * @type {string} + */ + SKU_NAME: 'x-ms-sku-name', + + /** + * The header that describes the flavour of the storage account, also known as account kind. + * + * @const + * @type {string} + */ + ACCOUNT_KIND: 'x-ms-account-kind', + + /** + * The header that specifies lease ID. + * + * @const + * @type {string} + */ + LEASE_ID: 'x-ms-lease-id', + + /** + * The header that specifies the lease break period. + * + * @const + * @type {string} + */ + LEASE_BREAK_PERIOD: 'x-ms-lease-break-period', + + /** + * The header that specifies the proposed lease identifier. + * + * @const + * @type {string} + */ + PROPOSED_LEASE_ID: 'x-ms-proposed-lease-id', + + /** + * The header that specifies the lease duration. + * + * @const + * @type {string} + */ + LEASE_DURATION: 'x-ms-lease-duration', + + /** + * The header that specifies the source lease ID. + * + * @const + * @type {string} + */ + SOURCE_LEASE_ID: 'x-ms-source-lease-id', + + /** + * The header that specifies lease time. + * + * @const + * @type {string} + */ + LEASE_TIME: 'x-ms-lease-time', + + /** + * The header that specifies lease status. + * + * @const + * @type {string} + */ + LEASE_STATUS: 'x-ms-lease-status', + + /** + * The header that specifies lease state. + * + * @const + * @type {string} + */ + LEASE_STATE: 'x-ms-lease-state', + + /** + * Specifies the page blob type. + * + * @const + * @type {string} + */ + PAGE_BLOB: 'PageBlob', + + /** + * The header that specifies page write mode. + * + * @const + * @type {string} + */ + PAGE_WRITE: 'x-ms-page-write', + + /** + * The header that specifies file range write mode. + * + * @const + * @type {string} + */ + FILE_WRITE: 'x-ms-write', + + /** + * The header that specifies whether the response should include the inserted entity. + * + * @const + * @type {string} + */ + PREFER: 'Prefer', + + /** + * The header value which specifies that the response should include the inserted entity. + * + * @const + * @type {string} + */ + PREFER_CONTENT: 'return-content', + + /** + * The header value which specifies that the response should not include the inserted entity. + * + * @const + * @type {string} + */ + PREFER_NO_CONTENT: 'return-no-content', + + /** + * The header prefix for metadata. + * + * @const + * @type {string} + */ + PREFIX_FOR_STORAGE_METADATA: 'x-ms-meta-', + + /** + * The header prefix for properties. + * + * @const + * @type {string} + */ + PREFIX_FOR_STORAGE_PROPERTIES: 'x-ms-prop-', + + /** + * The Range header. + * + * @const + * @type {string} + */ + RANGE: 'Range', + + /** + * The Source Range header. + * + * @const + * @type {string} + */ + SOURCE_RANGE: 'x-ms-source-range', + + /** + * The header that specifies if the request will populate the ContentMD5 header for range gets. + * + * @const + * @type {string} + */ + RANGE_GET_CONTENT_MD5: 'x-ms-range-get-content-md5', + + /** + * The format string for specifying ranges. + * + * @const + * @type {string} + */ + RANGE_HEADER_FORMAT: 'bytes:%d-%d', + + /** + * The header that indicates the request ID. + * + * @const + * @type {string} + */ + REQUEST_ID: 'x-ms-request-id', + + /** + * The header for specifying the sequence number. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER: 'x-ms-blob-sequence-number', + + /** + * The header for specifying the If-Sequence-Number-EQ condition. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER_EQUAL: 'x-ms-if-sequence-number-eq', + + /** + * The header for specifying the If-Sequence-Number-LT condition. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER_LESS_THAN: 'x-ms-if-sequence-number-lt', + + /** + * The header for specifying the If-Sequence-Number-LE condition. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER_LESS_THAN_OR_EQUAL: 'x-ms-if-sequence-number-le', + + /** + * The header that specifies sequence number action. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER_ACTION: 'x-ms-sequence-number-action', + + /** + * The header for the blob content length. + * + * @const + * @type {string} + */ + SIZE: 'x-ms-blob-content-length', + + /** + * The header for snapshots. + * + * @const + * @type {string} + */ + SNAPSHOT: 'x-ms-snapshot', + + /** + * Specifies only snapshots are to be included. + * + * @const + * @type {string} + */ + SNAPSHOTS_ONLY_VALUE: 'only', + + /** + * The header for the If-Match condition. + * + * @const + * @type {string} + */ + SOURCE_IF_MATCH: 'x-ms-source-if-match', + + /** + * The header for the If-Modified-Since condition. + * + * @const + * @type {string} + */ + SOURCE_IF_MODIFIED_SINCE: 'x-ms-source-if-modified-since', + + /** + * The header for the If-None-Match condition. + * + * @const + * @type {string} + */ + SOURCE_IF_NONE_MATCH: 'x-ms-source-if-none-match', + + /** + * The header for the If-Unmodified-Since condition. + * + * @const + * @type {string} + */ + SOURCE_IF_UNMODIFIED_SINCE: 'x-ms-source-if-unmodified-since', + + /** + * The header for data ranges. + * + * @const + * @type {string} + */ + STORAGE_RANGE: 'x-ms-range', + + /** + * The header for storage version. + * + * @const + * @type {string} + */ + STORAGE_VERSION: 'x-ms-version', + + /** + * The current storage version header value. + * + * @const + * @type {string} + */ + TARGET_STORAGE_VERSION: '2018-03-28', + + /** + * The UserAgent header. + * + * @const + * @type {string} + */ + USER_AGENT: 'user-agent', + + /** + * The pop receipt header. + * + * @const + * @type {string} + */ + POP_RECEIPT: 'x-ms-popreceipt', + + /** + * The time next visibile header. + * + * @const + * @type {string} + */ + TIME_NEXT_VISIBLE: 'x-ms-time-next-visible', + + /** + * The approximate message counter header. + * + * @const + * @type {string} + */ + APPROXIMATE_MESSAGE_COUNT: 'x-ms-approximate-message-count', + + /** + * The lease action header. + * + * @const + * @type {string} + */ + LEASE_ACTION: 'x-ms-lease-action', + + /** + * The accept header. + * + * @const + * @type {string} + */ + ACCEPT: 'accept', + + /** + * The accept charset header. + * + * @const + * @type {string} + */ + ACCEPT_CHARSET: 'Accept-Charset', + + /** + * The host header. + * + * @const + * @type {string} + */ + HOST: 'host', + + /** + * The correlation identifier header. + * + * @const + * @type {string} + */ + CORRELATION_ID: 'x-ms-correlation-id', + + /** + * The group identifier header. + * + * @const + * @type {string} + */ + GROUP_ID: 'x-ms-group-id', + + /** + * The share quota header. + * + * @const + * @type {string} + */ + SHARE_QUOTA: 'x-ms-share-quota', + + /** + * The max blob size header. + * + * @const + * @type {string} + */ + BLOB_CONDITION_MAX_SIZE: 'x-ms-blob-condition-maxsize', + + /** + * The append blob position header. + * + * @const + * @type {string} + */ + BLOB_CONDITION_APPEND_POSITION: 'x-ms-blob-condition-appendpos', + + /** + * The append blob append offset header. + * + * @const + * @type {string} + */ + BLOB_APPEND_OFFSET: 'x-ms-blob-append-offset', + + /** + * The append blob committed block header. + * + * @const + * @type {string} + */ + BLOB_COMMITTED_BLOCK_COUNT: 'x-ms-blob-committed-block-count', + + /** + * If the contents of the request have been successfully encrypted using the specified algorithm. + * + * @const + * @type {string} + */ + REQUEST_SERVER_ENCRYPTED: 'x-ms-request-server-encrypted', + + /** + * If the data and application metadata are completely encrypted using the specified algorithm. + * + * @const + * @type {string} + */ + SERVER_ENCRYPTED: 'x-ms-server-encrypted', + + /** + * Header indicates the resulting tier of the blob. + * + * @const + * @type {string} + */ + ACCESS_TIER: 'x-ms-access-tier', + + /** + * This is the datetime of when the last time tier was changed on the blob. + * + * @const + * @type {string} + */ + ACCESS_TIER_CHANGE_TIME: 'x-ms-access-tier-change-time', + + /** + * If the access tier is not explicitly set on the blob, + * the tier is inferred based on its content length + * and this header will be returned with true value. + * + * @const + * @type {string} + */ + ACCESS_TIER_INFERRED: 'x-ms-access-tier-inferred', + + /** + * For BlobStorage accounts, the header is returned if archive tier is set + * and rehydrate operation is pending for the request version is 2017-04-17 or later. + * The valid values are rehydrate-pending-to-hot or rehydrate-pending-to-cool. + * + * @const + * @type {string} + */ + ARCHIVE_STATUS: 'x-ms-archive-status' + }, + + QueryStringConstants: { + + /** + * Query component for SAS API version. + * @const + * @type {string} + */ + API_VERSION: 'api-version', + + /** + * The Comp value. + * + * @const + * @type {string} + */ + COMP: 'comp', + + /** + * The Res Type. + * + * @const + * @type {string} + */ + RESTYPE: 'restype', + + /** + * The copy Id. + * @const + * @type {string} + */ + COPY_ID: 'copyid', + + /** + * The snapshot value. + * + * @const + * @type {string} + */ + SNAPSHOT: 'snapshot', + + /** + * The share snapshot value. + * + * @const + * @type {string} + */ + SHARE_SNAPSHOT: 'sharesnapshot', + + /** + * The previous snapshot value. + * + * @const + * @type {string} + */ + PREV_SNAPSHOT: 'prevsnapshot', + + /** + * The timeout value. + * + * @const + * @type {string} + */ + TIMEOUT: 'timeout', + + /** + * The signed start time query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_START: 'st', + + /** + * The signed expiry time query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_EXPIRY: 'se', + + /** + * The signed resource query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_RESOURCE: 'sr', + + /** + * The signed permissions query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_PERMISSIONS: 'sp', + + /** + * The signed services query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_SERVICES: 'ss', + + /** + * The signed resource types query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_RESOURCE_TYPES: 'srt', + + /** + * The signed IP query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_IP: 'sip', + + /** + * The signed protocol query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_PROTOCOL: 'spr', + + /** + * The signed identifier query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_IDENTIFIER: 'si', + + /** + * The signature query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNATURE: 'sig', + + /** + * The signed version argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_VERSION: 'sv', + + /** + * The cache control argument for shared access signature. + * + * @const + * @type {string} + */ + CACHE_CONTROL: 'rscc', + + /** + * The content type argument for shared access signature. + * + * @const + * @type {string} + */ + CONTENT_TYPE: 'rsct', + + /** + * The content encoding argument for shared access signature. + * + * @const + * @type {string} + */ + CONTENT_ENCODING: 'rsce', + + /** + * The content language argument for shared access signature. + * + * @const + * @type {string} + */ + CONTENT_LANGUAGE: 'rscl', + + /** + * The content disposition argument for shared access signature. + * + * @const + * @type {string} + */ + CONTENT_DISPOSITION: 'rscd', + + /** + * The block identifier query string argument for blob service. + * + * @const + * @type {string} + */ + BLOCK_ID: 'blockid', + + /** + * The block list type query string argument for blob service. + * + * @const + * @type {string} + */ + BLOCK_LIST_TYPE: 'blocklisttype', + + /** + * The prefix query string argument for listing operations. + * + * @const + * @type {string} + */ + PREFIX: 'prefix', + + /** + * The marker query string argument for listing operations. + * + * @const + * @type {string} + */ + MARKER: 'marker', + + /** + * The maxresults query string argument for listing operations. + * + * @const + * @type {string} + */ + MAX_RESULTS: 'maxresults', + + /** + * The delimiter query string argument for listing operations. + * + * @const + * @type {string} + */ + DELIMITER: 'delimiter', + + /** + * The include query string argument for listing operations. + * + * @const + * @type {string} + */ + INCLUDE: 'include', + + /** + * The peekonly query string argument for queue service. + * + * @const + * @type {string} + */ + PEEK_ONLY: 'peekonly', + + /** + * The numofmessages query string argument for queue service. + * + * @const + * @type {string} + */ + NUM_OF_MESSAGES: 'numofmessages', + + /** + * The popreceipt query string argument for queue service. + * + * @const + * @type {string} + */ + POP_RECEIPT: 'popreceipt', + + /** + * The visibilitytimeout query string argument for queue service. + * + * @const + * @type {string} + */ + VISIBILITY_TIMEOUT: 'visibilitytimeout', + + /** + * The messagettl query string argument for queue service. + * + * @const + * @type {string} + */ + MESSAGE_TTL: 'messagettl', + + /** + * The select query string argument. + * + * @const + * @type {string} + */ + SELECT: '$select', + + /** + * The filter query string argument. + * + * @const + * @type {string} + */ + FILTER: '$filter', + + /** + * The top query string argument. + * + * @const + * @type {string} + */ + TOP: '$top', + + /** + * The skip query string argument. + * + * @const + * @type {string} + */ + SKIP: '$skip', + + /** + * The next partition key query string argument for table service. + * + * @const + * @type {string} + */ + NEXT_PARTITION_KEY: 'NextPartitionKey', + + /** + * The next row key query string argument for table service. + * + * @const + * @type {string} + */ + NEXT_ROW_KEY: 'NextRowKey', + + /** + * The lock identifier for service bus messages. + * + * @const + * @type {string} + */ + LOCK_ID: 'lockid', + + /** + * The table name for table SAS URI's. + * + * @const + * @type {string} + */ + TABLENAME: 'tn', + + /** + * The starting Partition Key for tableSAS URI's. + * + * @const + * @type {string} + */ + STARTPK: 'spk', + + /** + * The starting Partition Key for tableSAS URI's. + * + * @const + * @type {string} + */ + STARTRK: 'srk', + + /** + * The ending Partition Key for tableSAS URI's. + * + * @const + * @type {string} + */ + ENDPK: 'epk', + + /** + * The ending Partition Key for tableSAS URI's. + * + * @const + * @type {string} + */ + ENDRK: 'erk' + }, + + StorageServiceClientConstants: { + /** + * The default protocol. + * + * @const + * @type {string} + */ + DEFAULT_PROTOCOL: 'https:', + + /* + * Used environment variables. + * + * @const + * @enum {string} + */ + EnvironmentVariables: { + AZURE_STORAGE_ACCOUNT: 'AZURE_STORAGE_ACCOUNT', + AZURE_STORAGE_ACCESS_KEY: 'AZURE_STORAGE_ACCESS_KEY', + AZURE_STORAGE_DNS_SUFFIX: 'AZURE_STORAGE_DNS_SUFFIX', + AZURE_STORAGE_CONNECTION_STRING: 'AZURE_STORAGE_CONNECTION_STRING', + HTTP_PROXY: 'HTTP_PROXY', + HTTPS_PROXY: 'HTTPS_PROXY', + EMULATED: 'EMULATED' + }, + + /** + * Default credentials. + */ + DEVSTORE_STORAGE_ACCOUNT: 'devstoreaccount1', + DEVSTORE_STORAGE_ACCESS_KEY: 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + + /** + * The development store URI. + * + * @const + * @type {string} + */ + DEV_STORE_URI: 'http://127.0.0.1', + + /** + * Development ServiceClient URLs. + */ + DEVSTORE_DEFAULT_PROTOCOL: 'http://', + DEVSTORE_BLOB_HOST: '127.0.0.1:10000', + DEVSTORE_QUEUE_HOST: '127.0.0.1:10001', + DEVSTORE_TABLE_HOST: '127.0.0.1:10002', + + /** + * Production ServiceClient URLs. + */ + CLOUD_BLOB_HOST: 'blob.' + storageDnsSuffix, + CLOUD_QUEUE_HOST: 'queue.' + storageDnsSuffix, + CLOUD_TABLE_HOST: 'table.' + storageDnsSuffix, + CLOUD_FILE_HOST: 'file.' + storageDnsSuffix + }, + + HttpConstants: { + /** + * Http Verbs + * + * @const + * @enum {string} + */ + HttpVerbs: { + PUT: 'PUT', + GET: 'GET', + DELETE: 'DELETE', + POST: 'POST', + MERGE: 'MERGE', + HEAD: 'HEAD' + }, + + /** + * Response codes. + * + * @const + * @enum {int} + */ + HttpResponseCodes: { + Ok: 200, + Created: 201, + Accepted: 202, + NoContent: 204, + PartialContent: 206, + BadRequest: 400, + Unauthorized: 401, + Forbidden: 403, + NotFound: 404, + Conflict: 409, + LengthRequired: 411, + PreconditionFailed: 412 + } + }, + + CompatibleVersionConstants: { + /** + * Constant for the 2013-08-15 version. + * + * @const + * @type {string} + */ + AUGUST_2013: '2013-08-15', + + /** + * Constant for the 2012-02-12 version. + * + * @const + * @type {string} + */ + FEBRUARY_2012: '2012-02-12' + }, + + BlobErrorCodeStrings: { + INVALID_BLOCK_ID: 'InvalidBlockId', + BLOB_NOT_FOUND: 'BlobNotFound', + BLOB_ALREADY_EXISTS: 'BlobAlreadyExists', + CONTAINER_ALREADY_EXISTS: 'ContainerAlreadyExists', + CONTAINER_NOT_FOUND: 'ContainerNotFound', + INVALID_BLOB_OR_BLOCK: 'InvalidBlobOrBlock', + INVALID_BLOCK_LIST: 'InvalidBlockList' + }, + + FileErrorCodeStrings: { + SHARE_ALREADY_EXISTS: 'ShareAlreadyExists', + SHARE_NOT_FOUND: 'ShareNotFound', + FILE_NOT_FOUND: 'FileNotFound' + }, + + QueueErrorCodeStrings: { + QUEUE_NOT_FOUND: 'QueueNotFound', + QUEUE_DISABLED: 'QueueDisabled', + QUEUE_ALREADY_EXISTS: 'QueueAlreadyExists', + QUEUE_NOT_EMPTY: 'QueueNotEmpty', + QUEUE_BEING_DELETED: 'QueueBeingDeleted', + POP_RECEIPT_MISMATCH: 'PopReceiptMismatch', + INVALID_PARAMETER: 'InvalidParameter', + MESSAGE_NOT_FOUND: 'MessageNotFound', + MESSAGE_TOO_LARGE: 'MessageTooLarge', + INVALID_MARKER: 'InvalidMarker' + }, + + /** + * Constants for storage error strings + * + * More details are at: http://msdn.microsoft.com/en-us/library/azure/dd179357.aspx + */ + StorageErrorCodeStrings: { + // Not Modified (304): The condition specified in the conditional header(s) was not met for a read operation. + // Precondition Failed (412): The condition specified in the conditional header(s) was not met for a write operation. + CONDITION_NOT_MET: 'ConditionNotMet', + // Bad Request (400): A required HTTP header was not specified. + MISSING_REQUIRED_HEADER: 'MissingRequiredHeader', + // Bad Request (400): A required XML node was not specified in the request body. + MISSING_REQUIRED_XML_NODE: 'MissingRequiredXmlNode', + // Bad Request (400): One of the HTTP headers specified in the request is not supported. + UNSUPPORTED_HEADER: 'UnsupportedHeader', + // Bad Request (400): One of the XML nodes specified in the request body is not supported. + UNSUPPORTED_XML_NODE: 'UnsupportedXmlNode', + // Bad Request (400): The value provided for one of the HTTP headers was not in the correct format. + INVALID_HEADER_VALUE: 'InvalidHeaderValue', + // Bad Request (400): The value provided for one of the XML nodes in the request body was not in the correct format. + INVALID_XML_NODE_VALUE: 'InvalidXmlNodeValue', + // Bad Request (400): A required query parameter was not specified for this request. + MISSING_REQUIRED_QUERY_PARAMETER: 'MissingRequiredQueryParameter', + // Bad Request (400): One of the query parameters specified in the request URI is not supported. + UNSUPPORTED_QUERY_PARAMETER: 'UnsupportedQueryParameter', + // Bad Request (400): An invalid value was specified for one of the query parameters in the request URI. + INVALID_QUERY_PARAMETER_VALUE: 'InvalidQueryParameterValue', + // Bad Request (400): A query parameter specified in the request URI is outside the permissible range. + OUT_OF_RANGE_QUERY_PARAMETER_VALUE: 'OutOfRangeQueryParameterValue', + // Bad Request (400): The url in the request could not be parsed. + REQUEST_URL_FAILED_TO_PARSE: 'RequestUrlFailedToParse', + // Bad Request (400): The requested URI does not represent any resource on the server. + INVALID_URI: 'InvalidUri', + // Bad Request (400): The HTTP verb specified was not recognized by the server. + INVALID_HTTP_VERB: 'InvalidHttpVerb', + // Bad Request (400): The key for one of the metadata key-value pairs is empty. + EMPTY_METADATA_KEY: 'EmptyMetadataKey', + // Bad Request (400): The specified XML is not syntactically valid. + INVALID_XML_DOCUMENT: 'InvalidXmlDocument', + // Bad Request (400): The MD5 value specified in the request did not match the MD5 value calculated by the server. + MD5_MISMATCH: 'Md5Mismatch', + // Bad Request (400): The MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded. + INVALID_MD5: 'InvalidMd5', + // Bad Request (400): One of the request inputs is out of range. + OUT_OF_RANGE_INPUT: 'OutOfRangeInput', + // Bad Request (400): The authentication information was not provided in the correct format. Verify the value of Authorization header. + INVALID_AUTHENTICATION_INFO: 'InvalidAuthenticationInfo', + // Bad Request (400): One of the request inputs is not valid. + INVALID_INPUT: 'InvalidInput', + // Bad Request (400): The specified metadata is invalid. It includes characters that are not permitted. + INVALID_METADATA: 'InvalidMetadata', + // Bad Request (400): The specifed resource name contains invalid characters. + INVALID_RESOURCE_NAME: 'InvalidResourceName', + // Bad Request (400): The size of the specified metadata exceeds the maximum size permitted. + METADATA_TOO_LARGE: 'MetadataTooLarge', + // Bad Request (400): Condition headers are not supported. + CONDITION_HEADER_NOT_SUPPORTED: 'ConditionHeadersNotSupported', + // Bad Request (400): Multiple condition headers are not supported. + MULTIPLE_CONDITION_HEADER_NOT_SUPPORTED: 'MultipleConditionHeadersNotSupported', + // Forbidden (403): Server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature. + AUTHENTICATION_FAILED: 'AuthenticationFailed', + // Forbidden (403): Read-access geo-redundant replication is not enabled for the account. + // Forbidden (403): Write operations to the secondary location are not allowed. + // Forbidden (403): The account being accessed does not have sufficient permissions to execute this operation. + INSUFFICIENT_ACCOUNT_PERMISSIONS: 'InsufficientAccountPermissions', + // Not Found (404): The specified resource does not exist. + RESOURCE_NOT_FOUND: 'ResourceNotFound', + // Forbidden (403): The specified account is disabled. + ACCOUNT_IS_DISABLED: 'AccountIsDisabled', + // Method Not Allowed (405): The resource doesn't support the specified HTTP verb. + UNSUPPORTED_HTTP_VERB: 'UnsupportedHttpVerb', + // Conflict (409): The specified account already exists. + ACCOUNT_ALREADY_EXISTS: 'AccountAlreadyExists', + // Conflict (409): The specified account is in the process of being created. + ACCOUNT_BEING_CREATED: 'AccountBeingCreated', + // Conflict (409): The specified resource already exists. + RESOURCE_ALREADY_EXISTS: 'ResourceAlreadyExists', + // Conflict (409): The specified resource type does not match the type of the existing resource. + RESOURCE_TYPE_MISMATCH: 'ResourceTypeMismatch', + // Length Required (411): The Content-Length header was not specified. + MISSING_CONTENT_LENGTH_HEADER: 'MissingContentLengthHeader', + // Request Entity Too Large (413): The size of the request body exceeds the maximum size permitted. + REQUEST_BODY_TOO_LARGE: 'RequestBodyTooLarge', + // Requested Range Not Satisfiable (416): The range specified is invalid for the current size of the resource. + INVALID_RANGE: 'InvalidRange', + // Internal Server Error (500): The server encountered an internal error. Please retry the request. + INTERNAL_ERROR: 'InternalError', + // Internal Server Error (500): The operation could not be completed within the permitted time. + OPERATION_TIMED_OUT: 'OperationTimedOut', + // Service Unavailable (503): The server is currently unable to receive requests. Please retry your request. + SERVER_BUSY: 'ServerBusy', + + // Legacy error code strings + UPDATE_CONDITION_NOT_SATISFIED: 'UpdateConditionNotSatisfied', + CONTAINER_NOT_FOUND: 'ContainerNotFound', + CONTAINER_ALREADY_EXISTS: 'ContainerAlreadyExists', + CONTAINER_DISABLED: 'ContainerDisabled', + CONTAINER_BEING_DELETED: 'ContainerBeingDeleted' + }, + + TableErrorCodeStrings: { + XMETHOD_NOT_USING_POST: 'XMethodNotUsingPost', + XMETHOD_INCORRECT_VALUE: 'XMethodIncorrectValue', + XMETHOD_INCORRECT_COUNT: 'XMethodIncorrectCount', + TABLE_HAS_NO_PROPERTIES: 'TableHasNoProperties', + DUPLICATE_PROPERTIES_SPECIFIED: 'DuplicatePropertiesSpecified', + TABLE_HAS_NO_SUCH_PROPERTY: 'TableHasNoSuchProperty', + DUPLICATE_KEY_PROPERTY_SPECIFIED: 'DuplicateKeyPropertySpecified', + TABLE_ALREADY_EXISTS: 'TableAlreadyExists', + TABLE_NOT_FOUND: 'TableNotFound', + ENTITY_NOT_FOUND: 'EntityNotFound', + ENTITY_ALREADY_EXISTS: 'EntityAlreadyExists', + PARTITION_KEY_NOT_SPECIFIED: 'PartitionKeyNotSpecified', + OPERATOR_INVALID: 'OperatorInvalid', + UPDATE_CONDITION_NOT_SATISFIED: 'UpdateConditionNotSatisfied', + PROPERTIES_NEED_VALUE: 'PropertiesNeedValue', + PARTITION_KEY_PROPERTY_CANNOT_BE_UPDATED: 'PartitionKeyPropertyCannotBeUpdated', + TOO_MANY_PROPERTIES: 'TooManyProperties', + ENTITY_TOO_LARGE: 'EntityTooLarge', + PROPERTY_VALUE_TOO_LARGE: 'PropertyValueTooLarge', + INVALID_VALUE_TYPE: 'InvalidValueType', + TABLE_BEING_DELETED: 'TableBeingDeleted', + TABLE_SERVER_OUT_OF_MEMORY: 'TableServerOutOfMemory', + PRIMARY_KEY_PROPERTY_IS_INVALID_TYPE: 'PrimaryKeyPropertyIsInvalidType', + PROPERTY_NAME_TOO_LONG: 'PropertyNameTooLong', + PROPERTY_NAME_INVALID: 'PropertyNameInvalid', + BATCH_OPERATION_NOT_SUPPORTED: 'BatchOperationNotSupported', + JSON_FORMAT_NOT_SUPPORTED: 'JsonFormatNotSupported', + METHOD_NOT_ALLOWED: 'MethodNotAllowed', + NOT_IMPLEMENTED: 'NotImplemented' + }, + + ConnectionStringKeys: { + USE_DEVELOPMENT_STORAGE_NAME: 'UseDevelopmentStorage', + DEVELOPMENT_STORAGE_PROXY_URI_NAME: 'DevelopmentStorageProxyUri', + DEFAULT_ENDPOINTS_PROTOCOL_NAME: 'DefaultEndpointsProtocol', + ACCOUNT_NAME_NAME: 'AccountName', + ACCOUNT_KEY_NAME: 'AccountKey', + BLOB_ENDPOINT_NAME: 'BlobEndpoint', + FILE_ENDPOINT_NAME: 'FileEndpoint', + QUEUE_ENDPOINT_NAME: 'QueueEndpoint', + TABLE_ENDPOINT_NAME: 'TableEndpoint', + SHARED_ACCESS_SIGNATURE_NAME: 'SharedAccessSignature', + ENDPOINT_SUFFIX_NAME: 'EndpointSuffix', + BLOB_BASE_DNS_NAME: 'blob.core.windows.net', + FILE_BASE_DNS_NAME: 'file.core.windows.net', + QUEUE_BASE_DNS_NAME: 'queue.core.windows.net', + TABLE_BASE_DNS_NAME: 'table.core.windows.net' + } +}; + +module.exports = Constants; diff --git a/src/node_modules/azure-storage/lib/common/util/date.js b/src/node_modules/azure-storage/lib/common/util/date.js new file mode 100644 index 0000000..8a9bb67 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/date.js @@ -0,0 +1,69 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* Date/time related helper functions +* @module date +* +*/ + +/** +* Generates a Date object which is in the given days from now. +* +* @param {int} days The days timespan. +* @return {Date} +*/ +exports.daysFromNow = function (days) { + var date = new Date(); + date.setDate(date.getDate() + days); + return date; +}; + +/** +* Generates a Date object which is in the given hours from now. +* +* @param {int} hours The hours timespan. +* @return {Date} +*/ +exports.hoursFromNow = function (hours) { + var date = new Date(); + date.setHours(date.getHours() + hours); + return date; +}; + +/** +* Generates a Date object which is in the given minutes from now. +* +* @param {int} minutes The minutes timespan. +* @return {Date} +*/ +exports.minutesFromNow = function (minutes) { + var date = new Date(); + date.setMinutes(date.getMinutes() + minutes); + return date; +}; + +/** +* Generates a Date object which is in the given seconds from now. +* +* @param {int} seconds The seconds timespan. +* @return {Date} +*/ +exports.secondsFromNow = function (seconds) { + var date = new Date(); + date.setSeconds(date.getSeconds() + seconds); + return date; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/util/iso8061date.js b/src/node_modules/azure-storage/lib/common/util/iso8061date.js new file mode 100644 index 0000000..62555aa --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/iso8061date.js @@ -0,0 +1,66 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var rightPad = function (n, number) { + var currentN = '' + n; + while (currentN.length < number) { + currentN = currentN + '0'; + } + + return currentN; +}; + +/** +* Formats a date into an iso 8061 string. +* +* @param {date} date The date to format. +* @param {bool} skipMilliseconds Boolean value indicating if the miliseconds part of the date should not be included. +* @param {integer} millisecondsPading Number of digits to left pad the miliseconds. +* @return {string} The date formated in the ISO 8061 date format. +*/ +exports.format = function (date) { + var dateString = date.toISOString(); + return dateString.substring(0, dateString.length - 1) + '0000Z'; +}; + +/** +* Parses an ISO 8061 date string into a date object. +* +* @param {string} stringDateTime The string with the date to parse in the ISO 8061 format. +* @return {date} The parsed date. +*/ +exports.parse = function (stringDateTime) { + var parts = stringDateTime.split('T'); + var ymd = parts[0].split('-'); + var time = parts[1].split('.'); + var hms = time[0].split(':'); + var ms = 0; + if (time[1]) { + ms = time[1].split('Z'); + } + + var date = new Date(Date.UTC( + parseInt(ymd[0], 10), + parseInt(ymd[1], 10) - 1, + parseInt(ymd[2], 10), + parseInt(hms[0], 10), + parseInt(hms[1], 10), + parseInt(hms[2], 10), + Math.round(parseInt(rightPad(ms[0], 7), 10) / 10000) + )); + + return date; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/util/patch-xmlbuilder.js b/src/node_modules/azure-storage/lib/common/util/patch-xmlbuilder.js new file mode 100644 index 0000000..5a12049 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/patch-xmlbuilder.js @@ -0,0 +1,32 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +'use strict'; + +var XMLStringifier = require('xmlbuilder/lib/XMLStringifier'); + +// Patch xmlbuilder to allow Unicode surrogate pair code +// points in XML bodies + +XMLStringifier.prototype.assertLegalChar = function(str) { + var chars, chr; + chars = /[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE-\uFFFF]/; + chr = str.match(chars); + if (chr) { + throw new Error('Invalid character (' + chr + ') in string: ' + str); + } + return str; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/util/sr.js b/src/node_modules/azure-storage/lib/common/util/sr.js new file mode 100644 index 0000000..1125b10 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/sr.js @@ -0,0 +1,76 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +exports = module.exports; + +var SR = { + ANONYMOUS_ACCESS_BLOBSERVICE_ONLY: 'Anonymous access is only valid for the BlobService.', + ARGUMENT_NULL_OR_EMPTY: 'The argument must not be null or an empty string. Argument name: %s.', + ARGUMENT_NULL_OR_UNDEFINED: 'The argument must not be null or undefined. Argument name: %s.', + ARGUMENT_OUT_OF_RANGE_ERROR: 'The argument is out of range. Argument name: %s, Value passed: %s.', + BATCH_ONE_PARTITION_KEY: 'All entities in the batch must have the same PartitionKey value.', + BATCH_ONE_RETRIEVE: 'If a retrieve operation is part of a batch, it must be the only operation in the batch.', + BATCH_TOO_LARGE: 'Batches must not contain more than 100 operations.', + BLOB_INVALID_SEQUENCE_NUMBER: 'The sequence number may not be specified for an increment operation.', + BLOB_TYPE_MISMATCH: 'Blob type of the blob reference doesn\'t match blob type of the blob.', + CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY: 'Cannot create Shared Access Signature unless the Account Name and Key are used to create the ServiceClient.', + CONTENT_LENGTH_MISMATCH: 'An incorrect number of bytes was read from the connection. The connection may have been closed.', + CONTENT_TYPE_MISSING: 'Content-Type response header is missing or invalid.', + EMPTY_BATCH: 'Batch must not be empty.', + EXCEEDED_SIZE_LIMITATION: 'Upload exceeds the size limitation. Max size is %s but the current size is %s', + HASH_MISMATCH: 'Hash mismatch (integrity check failed), Expected value is %s, retrieved %s.', + INCORRECT_ENTITY_KEYS: 'PartitionKey and RowKey must be specified as strings in the entity object.', + INVALID_BLOB_LENGTH: 'createBlockBlobFromText requires the size of text to be less than 64MB. Please use createBlockBlobFromLocalFile or createBlockBlobFromStream to upload large blobs.', + INVALID_CONNECTION_STRING: 'Connection strings must be of the form "key1=value1;key2=value2".', + INVALID_CONNECTION_STRING_BAD_KEY: 'Connection string contains unrecognized key: "%s"', + INVALID_CONNECTION_STRING_DUPLICATE_KEY: 'Connection string contains duplicate key: "%s"', + INVALID_CONNECTION_STRING_EMPTY_KEY: 'Connection strings must not contain empty keys.', + INVALID_DELETE_SNAPSHOT_OPTION: 'The deleteSnapshots option cannot be included when deleting a specific snapshot using the snapshotId option.', + INVALID_EDM_TYPE: 'The value \'%s\' does not match the type \'%s\'.', + INVALID_FILE_LENGTH: 'createFileFromText requires the size of text to be less than 4MB. Please use createFileFromLocalFile or createFileFromStream to upload large files.', + INVALID_FILE_RANGE_FOR_UPDATE: 'Range size should be less than 4MB for a file range update operation.', + INVALID_HEADERS: 'Headers are not supported in the 2012-02-12 version.', + INVALID_MESSAGE_ID: 'Message ID cannot be null or undefined for deleteMessage and updateMessage operations.', + INVALID_PAGE_BLOB_LENGTH: 'Page blob length must be multiple of 512.', + INVALID_PAGE_END_OFFSET: 'Page end offset must be multiple of 512.', + INVALID_PAGE_RANGE_FOR_UPDATE: 'Page range size should be less than 4MB for a page update operation.', + INVALID_PAGE_START_OFFSET: 'Page start offset must be multiple of 512.', + INVALID_POP_RECEIPT: 'Pop Receipt cannot be null or undefined for deleteMessage and updateMessage operations.', + INVALID_PROPERTY_RESOLVER: 'The specified property resolver returned an invalid type. %s:{_:%s,$:%s }', + INVALID_RANGE_FOR_MD5: 'The requested range should be less than 4MB when contentMD5 is expected from the server', + INVALID_SAS_VERSION: 'SAS Version ? is invalid. Valid versions include: ?.', + INVALID_SAS_TOKEN: 'The SAS token should not contain api-version.', + INVALID_SIGNED_IDENTIFIERS: 'Signed identifiers need to be a hash object with key as the id and the value as the access policy.', + INVALID_STREAM_LENGTH: 'The length of the provided stream is invalid.', + INVALID_STRING_ERROR: 'Invalid string error.', + INVALID_TABLE_OPERATION: 'Operation not found: %s', + INVALID_TEXT_LENGTH: 'The length of the provided text is invalid.', + MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION: 'The client could not finish the operation within specified maximum execution timeout.', + MD5_NOT_POSSIBLE: 'MD5 cannot be calculated for an existing blob because it would require reading the existing data. Please disable storeBlobContentMD5.', + MD5_NOT_PRESENT_ERROR: 'MD5 does not exist. If you do not want to force validation, please disable useTransactionalMD5.', + METADATA_KEY_INVALID: 'The key for one of the metadata key-value pairs is null, empty, or whitespace.', + METADATA_VALUE_INVALID: 'The value for one of the metadata key-value pairs is null, empty, or whitespace.', + NO_CREDENTIALS_PROVIDED: 'Credentials must be provided when creating a service client.', + PRIMARY_ONLY_COMMAND: 'This operation can only be executed against the primary storage location.', + QUERY_OPERATOR_REQUIRES_WHERE: '%s operator needs to be used after where.', + SECONDARY_ONLY_COMMAND: 'This operation can only be executed against the secondary storage location.', + STORAGE_HOST_LOCATION_REQUIRED: 'The host for the storage service must be specified.', + STORAGE_HOST_MISSING_LOCATION: 'The host for the target storage location is not specified. Please consider changing the request\'s location mode.', + TYPE_NOT_SUPPORTED: 'Type not supported when sending data to the service: ', + MAX_BLOB_SIZE_CONDITION_NOT_MEET: 'Append block data should not exceed the maximum blob size condition value.', +}; + +module.exports = SR; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/util/storageutilities.js b/src/node_modules/azure-storage/lib/common/util/storageutilities.js new file mode 100644 index 0000000..bc5d205 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/storageutilities.js @@ -0,0 +1,42 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'StorageUtilities'. + +/** +* Defines constants, enums, and utility functions for use with storage. +* @namespace +*/ +var StorageUtilities = { + /** + * Specifies the location mode used to decide which location the request should be sent to. + * + * @const + * @enum {number} + */ + LocationMode: { + /** The primary location only */ + PRIMARY_ONLY: 0, + /** The primary location first, then the secondary */ + PRIMARY_THEN_SECONDARY: 1, + /** The secondary location only */ + SECONDARY_ONLY: 2, + /** The secondary location first, then the primary */ + SECONDARY_THEN_PRIMARY: 3 + } +}; + +module.exports = StorageUtilities; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/util/util.js b/src/node_modules/azure-storage/lib/common/util/util.js new file mode 100644 index 0000000..17ec192 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/util.js @@ -0,0 +1,618 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var _ = require('underscore'); +var util = require('util'); +var url = require('url'); +var stream = require('stream'); +var Constants = require('./constants'); +var Md5Wrapper = require('../md5-wrapper'); +var StorageUtilities = require('./storageutilities'); +var SR = require('./sr'); + +/** +* Trim the default port in the url. +* +* @param {string} uri The URI to be encoded. +* @return {string} The URI without defualt port. +*/ +exports.trimPortFromUri = function (uri) { + var uri = url.parse(uri); + if ((uri.protocol === Constants.HTTPS && uri.port == Constants.DEFAULT_HTTPS_PORT) || (uri.protocol === Constants.HTTP && uri.port == Constants.DEFAULT_HTTP_PORT)) { + uri.host = uri.hostname; + } + return url.format(uri); +}; + +/** +* Returns the number of keys (properties) in an object. +* +* @param {object} value The object which keys are to be counted. +* @return {number} The number of keys in the object. +*/ +exports.objectKeysLength = function (value) { + if (!value) { + return 0; + } + + return _.keys(value).length; +}; + +/** +* Checks if in a browser environment. +* +* @return {bool} True if in a browser environment, false otherwise. +*/ +exports.isBrowser = function () { + return typeof window !== 'undefined'; +}; + +/** +* Checks if in IE. +* +* @return {bool} True if in IE, false otherwise. +*/ +exports.isIE = function () { + if (!exports.isBrowser()) { + return false; + } + + var ua = window.navigator.userAgent; + var msie = ua.indexOf('MSIE '); + var trident = ua.indexOf('Trident/'); + return msie > 0 || trident > 0; +}; + +/** +* Checks if in a 32bit Node.js environment. +* +* @return {bool} True if in a 32bit Node.js environment, false otherwise. +*/ +exports.is32 = function () { + return !exports.isBrowser() && process.arch === 'ia32'; +}; + +/** +* Checks if a value is null or undefined. +* +* @param {object} value The value to check for null or undefined. +* @return {bool} True if the value is null or undefined, false otherwise. +*/ +exports.objectIsNull = function (value) { + return _.isNull(value) || _.isUndefined(value); +}; + +/** +* Checks if an object is empty. +* +* @param {object} object The object to check if it is null. +* @return {bool} True if the object is empty, false otherwise. +*/ +exports.objectIsEmpty = function (object) { + return _.isEmpty(object); +}; + +/** +* Determines if an object contains an integer number. +* +* @param {object} value The object to assert. +* @return {bool} True if the object contains an integer number; false otherwise. +*/ +exports.objectIsInt = function (value) { + return typeof value === 'number' && parseFloat(value) == parseInt(value, 10) && !isNaN(value); +}; + +/** +* Determines if an object is a NaN. +* +* @param {object} value The object to assert. +* @return {bool} True if the object is a NaN; false otherwise. +*/ +exports.objectIsNaN = function (value) { + return typeof(value) === 'number' && isNaN(value); +}; + +/** +* Checks if an object is a string. +* +* @param {object} object The object to check if it is a string. +* @return {bool} True if the object is a string, false otherwise. +*/ +exports.objectIsString = function (object) { + return _.isString(object); +}; + +/** +* Check if an object is a function +* @param {object} object The object to check whether it is function +* @return {bool} True if the specified object is function, otherwise false +*/ +exports.objectIsFunction = function (object) { + return _.isFunction(object); +}; + + +/** +* Front zero padding of string to sepcified length +*/ +exports.zeroPaddingString = function(str, len) { + var paddingStr = '0000000000' + str; + if(paddingStr.length < len) { + return exports.zeroPaddingString(paddingStr, len); + } else { + return paddingStr.substr(-1 * len); + } +}; + +/** +* Checks if a value is an empty string, null or undefined. +* +* @param {object} value The value to check for an empty string, null or undefined. +* @return {bool} True if the value is an empty string, null or undefined, false otherwise. +*/ +exports.stringIsEmpty = function (value) { + return _.isNull(value) || _.isUndefined(value) || value === ''; +}; + +/** +* Checks if a value is null, empty, undefined or consists only of white-space characters. +* +* @param {object} value The value to check for null, empty, undefined and white-space only characters. +* @return {bool} True if the value is an empty string, null, undefined, or consists only of white-space characters, false otherwise. +*/ +exports.IsNullOrEmptyOrUndefinedOrWhiteSpace = function (value) { + if(_.isNull(value) || _.isUndefined(value) || value === '') { + return true; + } + + if(_.isString(value) && value.trim().length === 0) { + return true; + } + + return false; +}; + +/** +* Formats a text replacing '?' by the arguments. +* +* @param {string} text The string where the ? should be replaced. +* @param {array} arguments Value(s) to insert in question mark (?) parameters. +* @return {string} +*/ +exports.stringFormat = function (text) { + if (arguments.length > 1) { + for (var i = 1; text.indexOf('?') !== -1; i++) { + text = text.replace('?', arguments[i]); + } + } + + return text; +}; + +/** +* Determines if a string starts with another. +* +* @param {string} text The string to assert. +* @param {string} prefix The string prefix. +* @return {Bool} True if the string starts with the prefix; false otherwise. +*/ +exports.stringStartsWith = function (text, prefix) { + if (_.isNull(prefix)) { + return true; + } + + return text.substr(0, prefix.length) === prefix; +}; + +/** +* Determines if a string ends with another. +* +* @param {string} text The string to assert. +* @param {string} suffix The string suffix. +* @return {Bool} True if the string ends with the suffix; false otherwise. +*/ +exports.stringEndsWith = function (text, suffix) { + if (_.isNull(suffix)) { + return true; + } + + return text.substr(text.length - suffix.length) === suffix; +}; + +/** +* Removes the BOM from a string. +* +* @param {string} str The string from where the BOM is to be removed +* @return {string} The string without the BOM. +*/ +exports.removeBOM = function (str) { + if (str.charCodeAt(0) === 0xfeff || str.charCodeAt(0) === 0xffef) { + str = str.substring(1); + } + + return str; +}; + +/** +* Merges multiple objects. +* +* @param {object} object The objects to be merged +* @return {object} The merged object. +*/ +exports.merge = function () { + return _.extend.apply(this, arguments); +}; + +/** +* Checks if a value exists in an array. The comparison is done in a case +* insensitive manner. +* +* @param {string} needle The searched value. +* @param {array} haystack The array. +* +* @static +* +* @return {boolean} +*/ +exports.inArrayInsensitive = function (needle, haystack) { + return _.contains(_.map(haystack, function (h) { return h.toLowerCase(); }), needle.toLowerCase()); +}; + +/** +* Returns the specified value of the key passed from object and in case that +* this key doesn't exist, the default value is returned. The key matching is +* done in a case insensitive manner. +* +* @param {string} key The array key. +* @param {object} haystack The object to be used. +* @param {mix} default The value to return if $key is not found in $array. +* +* @static +* +* @return mix +*/ +exports.tryGetValueInsensitive = function (key, haystack, defaultValue) { + if (haystack) { + for (var i in haystack) { + if (haystack.hasOwnProperty(i) && i.toString().toLowerCase() === key.toString().toLowerCase()) { + return haystack[i]; + } + } + } + + return defaultValue; +}; + +/** +* Returns the value in a chained object. +* +* @param {object} object The object with the values. +* @param {array} keys The keys. +* @param {mix} default The value to return if $key is not found in $array. +* +* @static +* +* @return mix +*/ +exports.tryGetValueChain = function (object, keys, defaultValue) { + if (keys.length === 0) { + return object; + } + + var currentKey = keys.shift(); + if (object && object[currentKey] !== undefined) { + return exports.tryGetValueChain(object[currentKey], keys, defaultValue); + } + + return defaultValue; +}; + +/** +* Set the value of an inner property of an object. +* +* @param {object} object The target object. +* @param {array} keys The property chain keys. +* @param {mix} object The value to be set. +* +* @static + +* @example +* // Set targetObject.propA.propB to 'testValue' +* var targetObject = {}; +* util.setObjectInnerPropertyValue(targetObject, ['propA', 'propB'], 'testValue'); +*/ +exports.setObjectInnerPropertyValue = function(object, propertyChainKeys, value){ + if(!object || propertyChainKeys.length < 1) { + return; + } + + var currentKey = propertyChainKeys.shift(); + if(propertyChainKeys.length === 0) { + object[currentKey] = value; + return; + } + + if (!object[currentKey]) { + object[currentKey] = {}; + } + + exports.setObjectInnerPropertyValue(object[currentKey], propertyChainKeys, value); +}; + +/** +* Rounds a date off to seconds. +* +* @param {Date} a date +* @return {string} the date in ISO8061 format, with no milliseconds component +*/ +exports.truncatedISO8061Date = function (date) { + var dateString = date.toISOString(); + return dateString.substring(0, dateString.length - 5) + 'Z'; +}; + +exports.normalizeArgs = function (optionsOrCallback, callback, result) { + var options = {}; + if(_.isFunction(optionsOrCallback) && !callback) { + callback = optionsOrCallback; + } else if (optionsOrCallback) { + options = optionsOrCallback; + } + + result(options, callback); +}; + +exports.getNodeVersion = function () { + var parsedVersion = process.version.split('.'); + return { + major: parseInt(parsedVersion[0].substr(1), 10), + minor: parseInt(parsedVersion[1], 10), + patch: parseInt(parsedVersion[2], 10) + }; +}; + +/** +* Calculate md5sum for the stream +* @ignore +*/ +exports.calculateMD5 = function(readStream, bufferLength, options, callback) { + var internalBuff = Buffer.alloc(bufferLength); + var index = 0; + var internalHash = new Md5Wrapper().createMd5Hash(); + readStream.on('data', function(data) { + if (index + data.length > bufferLength) { + var copyLength = bufferLength - index; + if (copyLength > 0) { + data = data.slice(0, copyLength); + data.copy(internalBuff, index); + internalHash.update(data); + index += copyLength; + } + readStream.emit('end'); + } else { + data.copy(internalBuff, index); + internalHash.update(data); + index += data.length; + } + }).on('end', function() { + if (!readStream.endEmitted) { + internalBuff = internalBuff.slice(0, index); + var contentMD5 = internalHash.digest('base64'); + // Set the flag to be compatible with Nodejs 0.10 which will keep emitting data from + // the file stream when the read stream has emitted the end event from its listner. + readStream.endEmitted = true; + callback(internalBuff, contentMD5); + } + }); +}; + +/** +* Whether the content of buffer is all zero +*/ +exports.isBufferAllZero = function (buffer) { + for(var i = 0, len = buffer.length; i < len; i++) { + if (buffer[i] !== 0) { + return false; + } + } + return true; +}; + +/** +* Write zero to stream +*/ +var zeroBuffer = null; +exports.writeZerosToStream = function (stream, length, md5Hash, progressCallback, callback) { + var defaultBufferSize = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + var bufferSize = Math.min(defaultBufferSize, length); + var remaining = length - bufferSize; + var buffer = null; + if (bufferSize == defaultBufferSize) { + if (!zeroBuffer) { + zeroBuffer = Buffer.alloc(defaultBufferSize); + zeroBuffer.fill(0); + } + buffer = zeroBuffer; + } else { + buffer = Buffer.alloc(bufferSize); + buffer.fill(0); + } + if (md5Hash) { + md5Hash.update(buffer); + } + //We can only write the entire buffer to stream instead of part of buffer. + return stream.write(buffer, function () { + if (exports.objectIsFunction(progressCallback)) { + progressCallback(null, buffer.length); + } + buffer = null; + if (remaining > 0) { + exports.writeZerosToStream(stream, remaining, md5Hash, progressCallback, callback); + } else if (exports.objectIsFunction(callback)) { + callback(null, null); + } + }); +}; + +/** +* Calculate md5sum for the content +*/ +exports.getContentMd5 = function (content, encoding) { + if (!encoding) encoding = 'base64'; + var internalHash = new Md5Wrapper().createMd5Hash(); + internalHash.update(content, 'utf8'); + return internalHash.digest(encoding); +}; + +exports.getNextLocation = function(lastLocation, locationMode) { + switch(locationMode) { + case StorageUtilities.LocationMode.PRIMARY_ONLY: + return Constants.StorageLocation.PRIMARY; + case StorageUtilities.LocationMode.SECONDARY_ONLY: + return Constants.StorageLocation.SECONDARY; + case StorageUtilities.LocationMode.PRIMARY_THEN_SECONDARY: + case StorageUtilities.LocationMode.SECONDARY_THEN_PRIMARY: + return (lastLocation === Constants.StorageLocation.PRIMARY) ? Constants.StorageLocation.SECONDARY : Constants.StorageLocation.PRIMARY; + default: + throw new RangeError(util.format(SR.ARGUMENT_OUT_OF_RANGE_ERROR, 'locationMode', locationMode)); + } +}; + +exports.getNextListingLocationMode = function (token) { + if(_.isNull(token) || _.isUndefined(token)) { + return Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + } + else { + switch (token.targetLocation) { + case Constants.StorageLocation.PRIMARY: + return Constants.RequestLocationMode.PRIMARY_ONLY; + case Constants.StorageLocation.SECONDARY: + return Constants.RequestLocationMode.SECONDARY_ONLY; + default: + throw new RangeError(util.format(SR.ARGUMENT_OUT_OF_RANGE_ERROR, 'targetLocation', token.targetLocation)); + } + } +}; + +exports.isStreamPaused = function (object) { + if (object instanceof stream) { + return object._paused === true || (object._readableState && object._readableState.flowing === false); + } + return false; +}; + +/** +* Parse copy progress string in the format of bytesCopied/totalBytes +*/ +exports.parseCopyProgress = function (progress) { + if (typeof progress != 'string' || progress.indexOf('/') === -1) { + return {}; + } + + var progressInfo = progress.split('/'); + return { bytesCopied: progressInfo[0], totalBytes: progressInfo[1] }; +}; + +/** +* The list of the properties should be normalized with explicit mapping +*/ +var normalizePropertyNameExceptionList = { + 'x-ms-blob-sequence-number': 'sequenceNumber', + 'content-Type': 'contentSettings.contentType', + 'content-Encoding': 'contentSettings.contentEncoding', + 'content-Language': 'contentSettings.contentLanguage', + 'cache-Control': 'contentSettings.cacheControl', + 'content-Disposition': 'contentSettings.contentDisposition', + 'content-MD5': 'contentSettings.contentMD5', + 'leaseId': 'lease.id', + 'leaseStatus': 'lease.status', + 'leaseDuration': 'lease.duration', + 'leaseState': 'lease.state', + 'copyId': 'copy.id', + 'copyStatus': 'copy.status', + 'copySource': 'copy.source', + 'copyProgress': 'copy.progress', + 'copyCompletionTime': 'copy.completionTime', + 'copyStatusDescription': 'copy.statusDescription', + 'copyDestinationSnapshot': 'copy.destinationSnapshot', + 'publicAccess': 'publicAccessLevel', + 'incrementalCopy': 'isIncrementalCopy' +}; + +/** +* Normalize the property name from XML to keep consistent with +* the name defined in the property headers +*/ +exports.normalizePropertyNameFromXML = function (propertyName) { + if (this.IsNullOrEmptyOrUndefinedOrWhiteSpace(propertyName)) { + return ''; + } + + propertyName = propertyName.trim(); + propertyName = propertyName[0].toLowerCase() + propertyName.substring(1); + // So far the cases are: + // for the 'last-modified' property in listing resources + // for the 'content-*' properties in listing resources + // for the 'cache-control' property in listing blobs + // for the 'x-ms-blob-sequence-number' in listing blobs + if (propertyName in normalizePropertyNameExceptionList) { + return normalizePropertyNameExceptionList[propertyName]; + } else if (propertyName.toLowerCase().indexOf('-') != -1) { + return propertyName.replace('-', ''); + } else { + return propertyName; + } +}; + +/** +* Set the property value from XML +*/ +exports.setPropertyValueFromXML = function (result, xmlNode, toNormalize) { + for (var subPropertyName in xmlNode) { + if (xmlNode.hasOwnProperty(subPropertyName)) { + if (toNormalize) { + var propertyChain = this.normalizePropertyNameFromXML(subPropertyName).split('.'); + exports.setObjectInnerPropertyValue(result, propertyChain, xmlNode[subPropertyName]); + } else { + result[subPropertyName.toLowerCase()] = xmlNode[subPropertyName]; + } + + if (subPropertyName.toLowerCase() === 'copyprogress') { + var info = this.parseCopyProgress(xmlNode[subPropertyName]); + exports.setObjectInnerPropertyValue(result, ['copy', 'bytesCopied'], parseInt(info.bytesCopied)); + exports.setObjectInnerPropertyValue(result, ['copy', 'totalBytes'], parseInt(info.totalBytes)); + } + } + } +}; + +/** + * Filter out non-reserved properties from options + */ +exports.filterOutNonReservedProperties = function (reserved, options) { + var nonReservedProperties = {}; + if (options) { + for (var prop in options) { + if (options.hasOwnProperty(prop)) { + var isReserved = reserved.hasOwnProperty(prop); + var isFunction = typeof options[prop] === 'function'; + if (!isReserved && !isFunction) { + nonReservedProperties[prop] = options[prop]; + } + } + } + } + return nonReservedProperties; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/common/util/validate.js b/src/node_modules/azure-storage/lib/common/util/validate.js new file mode 100644 index 0000000..25a0506 --- /dev/null +++ b/src/node_modules/azure-storage/lib/common/util/validate.js @@ -0,0 +1,560 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var _ = require('underscore'); +var util = require('util'); + +var constants = require('./../util/constants'); +var blobConstants = constants.BlobConstants; +var BlobUtilities = require('./../../services/blob/blobutilities'); +var FileUtilities = require('./../../services/file/fileutilities'); +var azureutil = require('./util'); +var SR = require('./sr'); +var check = require('validator'); +var errors = require('../errors/errors'); +var ArgumentError = errors.ArgumentError; +var ArgumentNullError = errors.ArgumentNullError; + +exports = module.exports; + +function initCallback(callbackParam, resultsCb) { + var fail; + if (callbackParam) { + fail = function (err) { + callbackParam(err); + return false; + }; + } else { + fail = function (err) { + throw err; + }; + callbackParam = function () {}; + } + + resultsCb(fail, callbackParam); +} + +/** +* Checks if the given value is a valid enumeration or not. +* +* @param {object} value The value to validate. +* @param {object} list The enumeration values. +* @return {boolean} +*/ +exports.isValidEnumValue = function (value, list, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (!list.some(function (current) { + return current.toLowerCase() === value.toLowerCase(); + })) { + return fail(new RangeError(util.format('Invalid value: %s. Options are: %s.', value, list))); + } + + callback(); + return true; +}; + +/** +* Creates a anonymous function that check if the given uri is valid or not. +* +* @param {string} uri The uri to validate. +* @return {boolean} +*/ +exports.isValidUri = function (uri) { + if (!check.isURL(uri, { 'require_tld': false })){ + throw new URIError('The provided URI "' + uri + '" is invalid.'); + } + return true; +}; + +/** +* Checks if the given host is valid or not. +* +* @param {string|object} host The host to validate. +* @return {boolean} +*/ +exports.isValidHost= function (host) { + if (azureutil.objectIsNull(host)) { + throw new ArgumentNullError('host', SR.STORAGE_HOST_LOCATION_REQUIRED); + } else { + var storageHost = {}; + storageHost.primaryHost = _.isString(host) ? host : host.primaryHost; + if (storageHost.primaryHost && !check.isURL(storageHost.primaryHost, { 'require_tld': false })){ + throw new URIError('The provided URI "' + storageHost.primaryHost + '" is invalid.'); + } + + storageHost.secondaryHost = _.isString(host) ? undefined : host.secondaryHost; + if (storageHost.secondaryHost && !check.isURL(storageHost.secondaryHost, { 'require_tld': false })){ + throw new URIError('The provided URI "' + storageHost.secondaryHost + '" is invalid.'); + } + + if (!storageHost.primaryHost && !storageHost.secondaryHost) { + throw new ArgumentNullError('host', SR.STORAGE_HOST_LOCATION_REQUIRED); + } + } + + return true; +}; + +/** +* Checks if the given value is a valid UUID or not. +* +* @param {string|object} uuid The uuid to validate. +* @return {boolean} +*/ +exports.isValidUuid = function(uuid, callback) { + var validUuidRegex = /^[a-zA-Z0-9]{8}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{12}$/; + + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (!validUuidRegex.test(uuid)) { + return fail(new SyntaxError('The value is not a valid UUID format.')); + } + + callback(); + return true; +}; + +/** +* Creates a anonymous function that check if a given key is base 64 encoded. +* +* @param {string} key The key to validate. +* @return {function} +*/ +exports.isBase64Encoded = function (key) { + var isValidBase64String = key.match(/^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)$/); + + if (isValidBase64String) { + return true; + } else { + throw new SyntaxError('The provided account key ' + key + ' is not a valid base64 string.'); + } +}; + +/** +* Validates a function. +* +* @param {object} function The function to validate. +* @return {function} +*/ +exports.isValidFunction = function (functionObject, functionName) { + if (!functionObject) { + throw new ArgumentNullError('functionObject', functionName + ' must be specified.'); + } + if(!_.isFunction(functionObject)){ + throw new TypeError(functionName + ' specified should be a function.'); + } + return true; +}; + +var getNameError = function(name, typeName) { + // checks if name is null, undefined or empty + if (azureutil.stringIsEmpty(name)) { + return new ArgumentNullError('name', util.format('%s name must be a non empty string.', typeName)); + } + + // check if name is between 3 and 63 characters + if (name.length < 3 || name.length > 63) { + return new ArgumentError('name', util.format('%s name must be between 3 and 63 characters long.', typeName)); + } + + // check if name follows naming rules + if (name.match(/^([a-z0-9]+(-[a-z0-9]+)*)$/) === null) { + return new SyntaxError(util.format('%s name format is incorrect.', typeName)); + } + + return null; +}; + +/** +* Validates a container name. +* +* @param {string} containerName The container name. +*/ +exports.containerNameIsValid = function (containerName, callback) { + var fail; + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + var nameError = getNameError(containerName, 'Container'); + + if (!nameError || containerName.match(/^(\$root|\$logs|\$web)$/)) { + callback(); + return true; + } else { + return fail(nameError); + } +}; + +/** +* Validates a blob name. +* +* @param {string} containerName The container name. +* @param {string} blobname The blob name. +*/ +exports.blobNameIsValid = function (containerName, blobName, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (!blobName) { + return fail(new ArgumentNullError('blobName', 'Blob name is not specified.')); + } + + if (containerName === '$root' && blobName.indexOf('/') !== -1) { + return fail(new SyntaxError('Blob name format is incorrect.')); + } + + callback(); + return true; +}; + +/** +* Validates a blob tier name. +* +* @param {string} blobTier The blob tier name. +*/ +exports.blobTierNameIsValid = function (blobTier, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (!blobTier) { + return fail(new ArgumentNullError('blobTier', 'Blob tier is not specified.')); + } + + if (!_.chain(_.union( + _.values(BlobUtilities.BlobTier.PremiumPageBlobTier), + _.values(BlobUtilities.BlobTier.StandardBlobTier) + )) + .map(function (val) { return val.toString().toUpperCase(); }) + .contains(blobTier.toString().toUpperCase()) + .value()) { + return fail(new SyntaxError('Blob tier is incorrect. Refer to BlobUtilities.BlobTier for possible values.')); + } + + callback(); + return true; +}; + +/** +* Validates a share name. +* +* @param {string} shareName The share name. +*/ +exports.shareNameIsValid = function (shareName, callback) { + var fail; + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + var nameError = getNameError(shareName, 'Share'); + + if (!nameError) { + callback(); + return true; + } else { + return fail(nameError); + } +}; + +/** +* Validates a queue name. +* +* @param {string} queueName The queue name. +*/ +exports.queueNameIsValid = function (queueName, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + var nameError = getNameError(queueName, 'Queue'); + + if (!nameError) { + callback(); + return true; + } else { + return fail(nameError); + } +}; + +/** +* Validates a table name. +* +* @param {string} table The table name. +*/ +exports.tableNameIsValid = function (table, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (azureutil.stringIsEmpty(table)) { + return fail(new ArgumentNullError('table', 'Table name must be a non empty string.')); + } + + if (table.length < 3 || table.length > 63) { + return fail(new ArgumentError('table', 'Table name must be between 3 and 63 characters long.')); + } + + if(table.toLowerCase() === 'tables') { + return fail(new RangeError('Table name cannot be \'Tables\'.')); + } + + if (table.match(/^([A-Za-z][A-Za-z0-9]{2,62})$/) !== null || table === '$MetricsCapacityBlob' || table.match(/^(\$Metrics(HourPrimary|MinutePrimary|HourSecondary|MinuteSecondary)?(Transactions)(Blob|Queue|Table|File))$/) !== null) + { + callback(); + return true; + } else { + return fail(new SyntaxError('Table name format is incorrect.')); + } +}; + +/** +* Validates an HTML File object. +* +* @param {File} browserFile The HTML File object. +*/ +exports.browserFileIsValid = function (browserFile, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + // IE doesn't support File.constructor.name + if (!azureutil.isBrowser() || + !browserFile || + !browserFile.constructor || + (!azureutil.isIE() && !browserFile.constructor.name) || + (!azureutil.isIE() && browserFile.constructor.name !== 'File' && browserFile.constructor.name !== 'Blob') || + !azureutil.objectIsInt(browserFile.size)) { + return fail(new ArgumentError('type', 'Invalid HTML File object.')); + } else { + callback(); + return true; + } +}; + +/** +* Validates page ranges. +* +* @param {int} rangeStart The range starting position. +* @param {int} rangeEnd The range ending position. +* @param {int} writeBlockSizeInBytes The block size. +*/ +exports.pageRangesAreValid = function (rangeStart, rangeEnd, writeBlockSizeInBytes, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (rangeStart % 512 !== 0) { + return fail(new RangeError('Start byte offset must be a multiple of 512.')); + } + + var size = null; + if (!azureutil.objectIsNull(rangeEnd)) { + if ((rangeEnd + 1) % 512 !== 0) { + return fail(new RangeError('End byte offset must be a multiple of 512 minus 1.')); + } + + size = (rangeEnd - rangeStart) + 1; + if (size > writeBlockSizeInBytes) { + return fail(new RangeError('Page blob size cannot be larger than ' + writeBlockSizeInBytes + ' bytes.')); + } + } + + callback(); + return true; +}; + +/** +* Validates a blob type. +* +* @param {string} type The type name. +*/ +exports.blobTypeIsValid = function (type, callback) { + var getEnumValues = function (obj) { + var values = []; + for (var prop in obj) { + if (obj.hasOwnProperty(prop)) { + values.push(obj[prop]); + } + } + return values; + }; + + return this.isValidEnumValue(type, getEnumValues(blobConstants.BlobTypes), callback); +}; + +/** +* Validates share ACL type. +* +* @param {string} type The type name. +*/ +exports.shareACLIsValid = function (type, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (type != FileUtilities.SharePublicAccessType.OFF) { + fail(new ArgumentError('type', 'The access type is not supported.')); + } + + callback(); + return true; +}; + +/** +* Validates share quota value. +* +* @param {int} type The quota value. +*/ +exports.shareQuotaIsValid = function (quota, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (quota && quota <= 0) { + fail(new RangeError('The share quota value, in GB, must be greater than 0.')); + } + + callback(); + return true; +}; + +// common functions for validating arguments + +function throwMissingArgument(name, func) { + throw new ArgumentNullError(name, 'Required argument ' + name + ' for function ' + func + ' is not defined'); +} + +function ArgumentValidator(functionName) { + this.func = functionName; +} + +_.extend(ArgumentValidator.prototype, { + string: function (val, name) { + this.exists(val, name); + if (typeof val !== 'string') { + throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a non-empty string'); + } + }, + + stringAllowEmpty: function (val, name) { + if (typeof val !== 'string') { + throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a string'); + } + }, + + object: function (val, name) { + this.exists(val, name); + if (typeof val !== 'object') { + throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be an object'); + } + }, + + exists: function (val, name) { + if (!val) { + throwMissingArgument(name, this.func); + } + }, + + function: function (val, name) { + this.exists(val, name); + if (typeof val !== 'function') { + throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a function'); + } + }, + + value: function (val, name) { + if (!val && val !== 0) { + throwMissingArgument(name, this.func); + } + }, + + nonEmptyArray: function (val, name) { + if (!val || val.length === 0) { + throw new TypeError('Required array argument ' + name + ' for function ' + this.func + ' is either not defined or empty'); + } + }, + + callback: function (val) { + this.exists(val, 'callback'); + this.function(val, 'callback'); + }, + + test: function (predicate, message) { + if (!predicate()) { + throw new Error(message + ' in function ' + this.func); + } + }, + + tableNameIsValid: exports.tableNameIsValid, + browserFileIsValid: exports.browserFileIsValid, + containerNameIsValid: exports.containerNameIsValid, + shareNameIsValid: exports.shareNameIsValid, + blobNameIsValid: exports.blobNameIsValid, + blobTierNameIsValid: exports.blobTierNameIsValid, + pageRangesAreValid: exports.pageRangesAreValid, + queueNameIsValid: exports.queueNameIsValid, + blobTypeIsValid: exports.blobTypeIsValid, + shareACLIsValid: exports.shareACLIsValid, + shareQuotaIsValid: exports.shareQuotaIsValid, + isValidEnumValue: exports.isValidEnumValue +}); + +function validateArgs(functionName, validationRules) { + var validator = new ArgumentValidator(functionName); + validationRules(validator); +} + +exports.ArgumentValidator = ArgumentValidator; +exports.validateArgs = validateArgs; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/blob/blobservice.browser.js b/src/node_modules/azure-storage/lib/services/blob/blobservice.browser.js new file mode 100644 index 0000000..0b50d66 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/blobservice.browser.js @@ -0,0 +1,319 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var BlobService = require('./blobservice.core'); +var azureCommon = require('./../../common/common.browser'); +var extend = require('extend'); +var mime = require('browserify-mime'); + +var Constants = azureCommon.Constants; +var azureutil = azureCommon.util; +var BlobConstants = Constants.BlobConstants; +var BrowserFileReadStream = azureCommon.BrowserFileReadStream; +var SpeedSummary = azureCommon.SpeedSummary; +var validate = azureCommon.validate; + +/** +* Creates a new block blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* (Only available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {File} browserFile The File object to be uploaded created by HTML File API. +* @param {object} [options] The request options. +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createBlockBlobFromBrowserFile = function (container, blob, browserFile, optionsOrCallback, callback) { + return this._createBlobFromBrowserFile(container, blob, BlobConstants.BlobTypes.BLOCK, browserFile, optionsOrCallback, callback); +}; + +/** +* Uploads a page blob from an HTML file. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* (Only available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {File} browserFile The File object to be uploaded created by HTML File API. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createPageBlobFromBrowserFile = function (container, blob, browserFile, optionsOrCallback, callback) { + return this._createBlobFromBrowserFile(container, blob, BlobConstants.BlobTypes.PAGE, browserFile, optionsOrCallback, callback); +}; + +/** +* Creates a new append blob from an HTML File object. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If you want to append data to an already existing blob, please look at appendFromBrowserFile. +* (Only available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {File} browserFile The File object to be uploaded created by HTML File API. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 ahash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createAppendBlobFromBrowserFile = function (container, blob, browserFile, optionsOrCallback, callback) { + return this._createBlobFromBrowserFile(container, blob, BlobConstants.BlobTypes.APPEND, browserFile, optionsOrCallback, callback); +}; + +/** +* Appends to an append blob from an HTML File object. Assumes the blob already exists on the service. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* (Only available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {File} browserFile The File object to be uploaded created by HTML File API. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.appendFromBrowserFile = function (container, blob, browserFile, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('appendFromBrowserFile', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.browserFileIsValid(browserFile); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + var stream = new BrowserFileReadStream(browserFile); + var streamCallback = function (appendError, blob, response) { + if (azureutil.objectIsFunction(stream.destroy)) { + stream.destroy(); + } + callback(appendError, blob, response); + }; + this._uploadBlobFromStream(false, container, blob, BlobConstants.BlobTypes.APPEND, stream, browserFile.size, options, streamCallback); + + return options.speedSummary; +}; + +// Private methods + +/** +* Creates a new blob (Block/Page/Append). If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* (Only available in the JavaScript Client Library for Browsers) +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {File} browserFile The File object to be uploaded created by HTML File API. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The MD5 hash of the blob content. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* +* @return {SpeedSummary} +* +*/ +BlobService.prototype._createBlobFromBrowserFile = function (container, blob, blobType, browserFile, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createBlobFromBrowserFile', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + v.browserFileIsValid(browserFile); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + var self = this; + var creationCallback = function (createError, createBlob, createResponse) { + if (createError) { + callback(createError, createBlob, createResponse); + } else { + // Automatically detect the mime type + if(azureutil.tryGetValueChain(options, ['contentSettings','contentType'], undefined) === undefined) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings','contentType'], mime.lookup(browserFile.name)); + } + + var stream = new BrowserFileReadStream(browserFile); + var streamCallback = function (createError, createBlob, createResponse) { + if (azureutil.objectIsFunction(stream.destroy)) { + stream.destroy(); + } + callback(createError, createBlob, createResponse); + }; + self._uploadBlobFromStream(true, container, blob, blobType, stream, browserFile.size, options, streamCallback); + } + }; + + this._createBlob(container, blob, blobType, browserFile.size, options, creationCallback); + + return options.speedSummary; +}; + +module.exports = BlobService; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/blob/blobservice.core.js b/src/node_modules/azure-storage/lib/services/blob/blobservice.core.js new file mode 100644 index 0000000..adaac07 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/blobservice.core.js @@ -0,0 +1,5871 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var qs = require('querystring'); +var url = require('url'); +var util = require('util'); +var _ = require('underscore'); +var extend = require('extend'); + +var azureCommon = require('./../../common/common.core'); +var BlockRangeStream = require('./internal/blockrangestream'); +var Md5Wrapper = require('./../../common/md5-wrapper'); +var PageRangeStream = require('./internal/pagerangestream'); +var RangeStream = require('./../../common/streams/rangestream'); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var validate = azureCommon.validate; +var StorageServiceClient = azureCommon.StorageServiceClient; +var WebResource = azureCommon.WebResource; + +// Constants +var Constants = azureCommon.Constants; +var BlobConstants = Constants.BlobConstants; +var HeaderConstants = Constants.HeaderConstants; +var QueryStringConstants = Constants.QueryStringConstants; +var RequestLocationMode = Constants.RequestLocationMode; + +// Streams +var BatchOperation = azureCommon.BatchOperation; +var SpeedSummary = azureCommon.SpeedSummary; +var ChunkAllocator = azureCommon.ChunkAllocator; +var ChunkStream = azureCommon.ChunkStream; +var ChunkStreamWithStream = azureCommon.ChunkStreamWithStream; + +// Models requires +var AclResult = azureCommon.AclResult; +var ServiceStatsParser = azureCommon.ServiceStatsParser; +var AccountPropertiesResult = require('../../common/models/accountpropertiesresult'); +var BlockListResult = require('./models/blocklistresult'); +var BlobResult = require('./models/blobresult'); +var ContainerResult = require('./models/containerresult'); +var LeaseResult = require('./models/leaseresult'); + +var BlobUtilities = require('./blobutilities'); + +// Errors requires +var errors = require('../../common/errors/errors'); +var ArgumentError = errors.ArgumentError; +var ArgumentNullError = errors.ArgumentNullError; +var StorageError = errors.StorageError; + +/** +* Creates a new BlobService object. +* If no connection string or storageaccount and storageaccesskey are provided, +* the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. +* @class +* The BlobService class is used to perform operations on the Microsoft Azure Blob Service. +* The Blob Service provides storage for binary large objects, and provides +* functions for working with data stored in blobs as either streams or pages of data. +* +* For more information on the Blob Service, as well as task focused information on using it in a Node.js application, see +* [How to Use the Blob Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-blob-storage/). +* The following defaults can be set on the blob service. +* singleBlobPutThresholdInBytes The default maximum size, in bytes, of a blob before it must be separated into blocks. +* defaultEnableReuseSocket The default boolean value to enable socket reuse when uploading local files or streams. +* If the Node.js version is lower than 0.10.x, socket reuse will always be turned off. +* defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Blob service. +* defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Blob service. +* defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Blob service. +* defaultLocationMode The default location mode for requests made via the Blob service. +* parallelOperationThreadCount The number of parallel operations that may be performed when uploading a blob that is greater than +* the value specified by the singleBlobPutThresholdInBytes property in size. +* useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Blob service; true to use the +* Nagle algorithm; otherwise, false. The default value is false. +* enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use +* http(s).Agent({keepAlive:true}). +* @constructor +* @extends {StorageServiceClient} +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} [sas] The Shared Access Signature string. +* @param {string} [endpointSuffix] The endpoint suffix. +* @param {TokenCredential} [token] The {@link TokenCredential} object. +*/ +function BlobService(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token) { + var storageServiceSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token); + + BlobService['super_'].call(this, + storageServiceSettings._name, + storageServiceSettings._key, + storageServiceSettings._blobEndpoint, + storageServiceSettings._usePathStyleUri, + storageServiceSettings._sasToken, + token); + + this.defaultEnableReuseSocket = Constants.DEFAULT_ENABLE_REUSE_SOCKET; + this.singleBlobPutThresholdInBytes = BlobConstants.DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES; + this.parallelOperationThreadCount = Constants.DEFAULT_PARALLEL_OPERATION_THREAD_COUNT; +} + +util.inherits(BlobService, StorageServiceClient); + +// Non-class methods + +/** +* Create resource name +* @ignore +* +* @param {string} containerName Container name +* @param {string} blobName Blob name +* @return {string} The encoded resource name. +*/ +function createResourceName(containerName, blobName, forSAS) { + // Resource name + if (blobName && !forSAS) { + blobName = encodeURIComponent(blobName); + blobName = blobName.replace(/%2F/g, '/'); + blobName = blobName.replace(/%5C/g, '/'); + blobName = blobName.replace(/\+/g, '%20'); + } + + // return URI encoded resource name + if (blobName) { + return containerName + '/' + blobName; + } + else { + return containerName; + } +} + +// Blob service methods + +/** +* Gets the service stats for a storage account’s Blob service. +* +* @this {BlobService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link ServiceStats}` will contain the stats and +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getServiceStats = function (optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('getServiceStats', function (v) { + v.callback(callback); + }); + + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'stats') + .withQueryOption(QueryStringConstants.RESTYPE, 'service'); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.serviceStatsResult = null; + if (!responseObject.error) { + responseObject.serviceStatsResult = ServiceStatsParser.parse(responseObject.response.body.StorageServiceStats); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.serviceStatsResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the properties of a storage account’s Blob service, including Azure Storage Analytics. +* +* @this {BlobService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link BlobServiceProperties}` will contain the properties +* and `response` will contain information related to this operation. +*/ +BlobService.prototype.getServiceProperties = function (optionsOrCallback, callback) { + return this.getAccountServiceProperties(optionsOrCallback, callback); +}; + +/** +* Gets the properties of a storage account. +* +* @this {BlobService} +* @param {string} [container] Optional. Name of an existing container. Required when using a SAS token to a specific container or blob. +* @param {string} [blob] Optional. Name of an existing blob. Required when using a SAS token to a specific blob. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link ServiceProperties}` will contain the properties +* and `response` will contain information related to this operation. +*/ +BlobService.prototype.getAccountProperties = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getAccountProperties', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var webResource = WebResource.head(createResourceName(container, blob)) + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withQueryOption(QueryStringConstants.RESTYPE, 'account'); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.accountPropertiesResult = null; + if (!responseObject.error) { + responseObject.accountPropertiesResult = AccountPropertiesResult.parse(responseObject.response.headers); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.accountPropertiesResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the properties of a storage account's Blob service, including Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* When you set blob service properties (such as enabling soft delete), it may take up to 30 seconds to take effect. +* +* @this {BlobService} +* @param {object} serviceProperties A `[BlobServiceProperties]{@link BlobServiceProperties}` object. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +BlobService.prototype.setServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + return this.setAccountServiceProperties(serviceProperties, optionsOrCallback, callback); +}; + +/** +* Sets the tier of a blockblob under a blob storage account, or the tier of a pageblob under a premium storage account. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} blobTier Please see BlobUtilities.BlobTier.StandardBlobTier or BlobUtilities.BlobTier.PremiumPageBlobTier for possible values. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +BlobService.prototype.setBlobTier = function (container, blob, blobTier, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setBlobTier', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.string(blobTier, 'blobTier'); + v.containerNameIsValid(container); + v.blobNameIsValid(container, blob); + v.blobTierNameIsValid(blobTier); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'tier') + .withHeader(HeaderConstants.ACCESS_TIER, blobTier); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Lists a segment containing a collection of container items under the specified account. +* +* @this {BlobService} +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.maxResults] Specifies the maximum number of containers to return per call to Azure storage. +* @param {string} [options.include] Include this parameter to specify that the container's metadata be returned as part of the response body. (allowed values: '', 'metadata') +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[containers]{@link ContainerResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listContainersSegmented = function (currentToken, optionsOrCallback, callback) { + this.listContainersSegmentedWithPrefix(null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of container items whose names begin with the specified prefix under the specified account. +* +* @this {BlobService} +* @param {string} prefix The prefix of the container name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.maxResults] Specifies the maximum number of containers to return per call to Azure storage. +* @param {string} [options.include] Include this parameter to specify that the container's metadata be returned as part of the response body. (allowed values: '', 'metadata') +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[containers]{@link ContainerResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listContainersSegmentedWithPrefix = function (prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listContainers', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOption(QueryStringConstants.INCLUDE, options.include); + + if (!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + webResource.withQueryOption(QueryStringConstants.PREFIX, prefix); + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listContainersResult = null; + + if (!responseObject.error) { + responseObject.listContainersResult = { + entries: null, + continuationToken: null + }; + responseObject.listContainersResult.entries = []; + + var containers = []; + + if (responseObject.response.body.EnumerationResults.Containers && responseObject.response.body.EnumerationResults.Containers.Container) { + containers = responseObject.response.body.EnumerationResults.Containers.Container; + if (!_.isArray(containers)) { + containers = [containers]; + } + } + + containers.forEach(function (currentContainer) { + var containerResult = ContainerResult.parse(currentContainer); + responseObject.listContainersResult.entries.push(containerResult); + }); + + if (responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listContainersResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listContainersResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listContainersResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listContainersResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// Container methods + +/** +* Checks whether or not a container exists on the service. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* the container information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.doesContainerExist = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('doesContainerExist', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._doesContainerExist(container, false, options, callback); +}; + +/** +* Creates a new container under the specified account. +* If a container with the same name already exists, the operation fails. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* the container information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createContainer = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createContainer', function (v) { + v.string(container, 'container'); + v.test(function () { return container !== '$logs'; }, + 'Container name format is incorrect'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container'); + + webResource.addOptionalMetadataHeaders(options.metadata); + webResource.withHeader(HeaderConstants.BLOB_PUBLIC_ACCESS, options.publicAccessLevel); + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + + if (options.metadata) { + responseObject.containerResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new container under the specified account if the container does not exists. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* the container information including `created` boolean member. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.createContainerIfNotExists('taskcontainer', {publicAccessLevel : 'blob'}, function(error) { +* if(!error) { +* // Container created or exists, and is public +* } +* }); +*/ +BlobService.prototype.createContainerIfNotExists = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createContainerIfNotExists', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesContainerExist(container, true, options, function (error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if (error) { + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createContainer(container, options, function (createError, containerResult, createResponse) { + if (!createError) { + containerResult.created = true; + } + else if (createError && createError.statusCode === Constants.HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.BlobErrorCodeStrings.CONTAINER_ALREADY_EXISTS) { + // If it was created before, there was no actual error. + createError = null; + createResponse.isSuccessful = true; + } + + callback(createError, containerResult, createResponse); + }); + } + }); +}; + +/** +* Retrieves a container and its properties from a specified account. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {string} [options.leaseId] The container lease identifier. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* information for the container. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getContainerProperties = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getContainerProperties', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Returns all user-defined metadata for the container. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* information for the container. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getContainerMetadata = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getContainerMetadata', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the container's metadata. +* +* Calling the Set Container Metadata operation overwrites all existing metadata that is associated with the container. +* It's not possible to modify an individual name/value pair. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setContainerMetadata = function (container, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setContainerMetadata', function (v) { + v.string(container, 'container'); + v.object(metadata, 'metadata'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + webResource.addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the container's ACL. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerAclResult}` will contain +* information for the container. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getContainerAcl = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getContainerAcl', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.containerResult.signedIdentifiers = AclResult.parse(responseObject.response.body); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the container's ACL. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. +* @param {string} [options.leaseId] The container lease identifier. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerAclResult}` will contain +* information for the container. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setContainerAcl = function (container, signedIdentifiers, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setContainerAcl', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var policies = null; + if (signedIdentifiers) { + if (_.isArray(signedIdentifiers)) { + throw new TypeError(SR.INVALID_SIGNED_IDENTIFIERS); + } + policies = AclResult.serialize(signedIdentifiers); + } + + var webResource = WebResource.put(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.CONTENT_LENGTH, !azureutil.objectIsNull(policies) ? Buffer.byteLength(policies) : 0) + .withHeader(HeaderConstants.BLOB_PUBLIC_ACCESS, options.publicAccessLevel) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId) + .withBody(policies); + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container, options.publicAccessLevel); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + if (signedIdentifiers) { + responseObject.containerResult.signedIdentifiers = signedIdentifiers; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Marks the specified container for deletion. +* The container and any blobs contained within it are later deleted during garbage collection. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.deleteContainer = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteContainer', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified container for deletion if it exists. +* The container and any blobs contained within it are later deleted during garbage collection. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the container exists and was deleted, or false if the container +* did not exist. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.deleteContainerIfExists = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteContainerIfExists', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesContainerExist(container, true, options, function (error, result, response) { + if (error) { + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteContainer(container, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError) { + deleted = true; + } else if (deleteError && deleteError.statuscode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.BlobErrorCodeStrings.CONTAINER_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Lists a segment containing a collection of blob directory items in the container. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {int} [options.maxResults] Specifies the maximum number of directories to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[directories]{@link DirectoryResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlobDirectoriesSegmented = function (container, currentToken, optionsOrCallback, callback) { + this.listBlobDirectoriesSegmentedWithPrefix(container, null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of blob directory items in the container. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} prefix The prefix of the blob directory. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {int} [options.maxResults] Specifies the maximum number of directories to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[directories]{@link BlobResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlobDirectoriesSegmentedWithPrefix = function (container, prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + userOptions.delimiter = '/'; + + this._listBlobsOrDircotriesSegmentedWithPrefix(container, prefix, currentToken, BlobConstants.ListBlobTypes.Directory, userOptions, callback); +}; + +/** +* Lists a segment containing a collection of blob items in the container. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {string} [options.delimiter] Delimiter, i.e. '/', for specifying folder hierarchy. +* @param {int} [options.maxResults] Specifies the maximum number of blobs to return per call to Azure ServiceClient. (maximum: 5000) +* @param {string} [options.include] Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs', 'copy', 'deleted'). +* Please find these values in BlobUtilities.BlobListingDetails. Multiple values can be added separated with a comma (,). +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[blobs]{@link BlobResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlobsSegmented = function (container, currentToken, optionsOrCallback, callback) { + this.listBlobsSegmentedWithPrefix(container, null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of blob items whose names begin with the specified prefix in the container. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} prefix The prefix of the blob name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {string} [options.delimiter] Delimiter, i.e. '/', for specifying folder hierarchy. +* @param {int} [options.maxResults] Specifies the maximum number of blobs to return per call to Azure ServiceClient. (maximum: 5000) +* @param {string} [options.include] Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs', 'copy', 'deleted'). +* Please find these values in BlobUtilities.BlobListingDetails. Multiple values can be added separated with a comma (,). +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the entries of `[blobs]{@link BlobResult}` and the continuation token for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlobsSegmentedWithPrefix = function (container, prefix, currentToken, optionsOrCallback, callback) { + this._listBlobsOrDircotriesSegmentedWithPrefix(container, prefix, currentToken, BlobConstants.ListBlobTypes.Blob, optionsOrCallback, callback); +}; + +// Lease methods + +/** +* Acquires a new lease. If container and blob are specified, acquires a blob lease. Otherwise, if only container is specified and blob is null, acquires a container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.leaseDuration] The lease duration in seconds. A non-infinite lease can be between 15 and 60 seconds. Default is never to expire. +* @param {string} [options.proposedLeaseId] The proposed lease identifier. Must be a GUID. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link LeaseResult}` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.acquireLease = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('acquireLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if (!options.leaseDuration) { + options.leaseDuration = -1; + } + + this._leaseImpl(container, blob, null /* leaseId */, BlobConstants.LeaseOperation.ACQUIRE, options, callback); +}; + +/** +* Renews an existing lease. If container and blob are specified, renews the blob lease. Otherwise, if only container is specified and blob is null, renews the container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} leaseId The lease identifier. Must be a GUID. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link LeaseResult}` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.renewLease = function (container, blob, leaseId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('renewLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._leaseImpl(container, blob, leaseId, BlobConstants.LeaseOperation.RENEW, options, callback); +}; + +/** +* Changes the lease ID of an active lease. If container and blob are specified, changes the blob lease. Otherwise, if only container is specified and blob is null, changes the +* container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} leaseId The current lease identifier. +* @param {string} proposedLeaseId The proposed lease identifier. Must be a GUID. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link LeaseResult}` will contain the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.changeLease = function (container, blob, leaseId, proposedLeaseId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('changeLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + options.proposedLeaseId = proposedLeaseId; + this._leaseImpl(container, blob, leaseId, BlobConstants.LeaseOperation.CHANGE, options, callback); +}; + +/** +* Releases the lease. If container and blob are specified, releases the blob lease. Otherwise, if only container is specified and blob is null, releases the container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} leaseId The lease identifier. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link LeaseResult}` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.releaseLease = function (container, blob, leaseId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('releaseLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._leaseImpl(container, blob, leaseId, BlobConstants.LeaseOperation.RELEASE, options, callback); +}; + +/** +* Breaks the lease but ensures that another client cannot acquire a new lease until the current lease period has expired. If container and blob are specified, breaks the blob lease. +* Otherwise, if only container is specified and blob is null, breaks the container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {int} [options.leaseBreakPeriod] The lease break period, between 0 and 60 seconds. If unspecified, a fixed-duration lease breaks after +* the remaining lease period elapses, and an infinite lease breaks immediately. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link LeaseResult}` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.breakLease = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('breakLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._leaseImpl(container, blob, null /*leaseId*/, BlobConstants.LeaseOperation.BREAK, options, callback); +}; + +// Blob methods + +/** +* Returns all user-defined metadata, standard HTTP properties, and system properties for the blob. +* It does not return or modify the content of the blob. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getBlobProperties = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getBlobProperties', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.head(resourceName); + + if (options.snapshotId) { + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + } + + BlobResult.setHeadersFromBlob(webResource, options); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Returns all user-defined metadata for the specified blob or snapshot. +* It does not modify or return the content of the blob. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getBlobMetadata = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getBlobMetadata', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.head(resourceName); + + webResource.withQueryOption(QueryStringConstants.COMP, 'metadata'); + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + BlobResult.setHeadersFromBlob(webResource, options); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined properties for the specified blob or snapshot. +* It does not modify or return the content of the blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [properties] The blob properties to set. +* @param {string} [properties.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [properties.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [properties.contentLanguage] The natural languages used by this resource. +* @param {string} [properties.cacheControl] The blob's cache control. +* @param {string} [properties.contentDisposition] The blob's content disposition. +* @param {string} [properties.contentMD5] The blob's MD5 hash. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setBlobProperties = function (container, blob, properties, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setBlobProperties', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, { contentSettings: properties }, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties'); + + BlobResult.setPropertiesFromBlob(webResource, options); + + this._setBlobPropertiesHelper({ + webResource: webResource, + options: options, + container: container, + blob: blob, + callback: callback + }); +}; + +/** +* Sets user-defined metadata for the specified blob or snapshot as one or more name-value pairs +* It does not modify or return the content of the blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* information on the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setBlobMetadata = function (container, blob, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setBlobMetadata', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.object(metadata, 'metadata'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + options.metadata = metadata; + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + + +/** +* Provides a stream to read from a blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link BlobResult}` will contain the blob information. +* `response` will contain information related to this operation. +* @return {Readable} A Node.js Readable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var writable = fs.createWriteStream(destinationFileNameTarget); +* blobService.createReadStream(containerName, blobName).pipe(writable); +*/ +BlobService.prototype.createReadStream = function (container, blob, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('createReadStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + }); + + var readStream = new ChunkStream(); + this.getBlobToStream(container, blob, readStream, options, function (error, responseBlob, response) { + if (error) { + readStream.emit('error', error); + } + + if (callback) { + callback(error, responseBlob, response); + } + }); + + return readStream; +}; + +/** +* Downloads a blob into a stream. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small blobs. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link BlobResult}` will contain the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.getBlobToStream('taskcontainer', 'task1', fs.createWriteStream('task1-download.txt'), function(error, serverBlob) { +* if(!error) { +* // Blob available in serverBlob.blob variable +* } +* }); +*/ +BlobService.prototype.getBlobToStream = function (container, blob, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + userOptions.speedSummary = userOptions.speedSummary || new SpeedSummary(blob); + + validate.validateArgs('getBlobToStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.object(writeStream, 'writeStream'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var propertiesRequestOptions = { + timeoutIntervalInMs: options.timeoutIntervalInMs, + clientRequestTimeoutInMs: options.clientRequestTimeoutInMs, + snapshotId: options.snapshotId, + accessConditions: options.accessConditions + }; + + if (options.skipSizeCheck) { + this._getBlobToStream(container, blob, writeStream, options, callback); + } else { + var self = this; + this.getBlobProperties(container, blob, propertiesRequestOptions, function (error, properties) { + if (error) { + callback(error); + } else { + var size; + if (options.rangeStart) { + var endOffset = properties.contentLength - 1; + var end = options.rangeEnd ? Math.min(options.rangeEnd, endOffset) : endOffset; + size = end - options.rangeStart + 1; + } else { + size = properties.contentLength; + } + options.speedSummary.totalSize = size; + + if (size > self.singleBlobPutThresholdInBytes) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], azureutil.tryGetValueChain(properties, ['contentSettings', 'contentMD5'], null)); + self._getBlobToRangeStream(container, blob, properties.blobType, writeStream, options, callback); + } else { + self._getBlobToStream(container, blob, writeStream, options, callback); + } + } + }); + } + + return options.speedSummary; +}; + +/** +* Downloads a blob into a text string. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {BlobService~blobToText} callback `error` will contain information +* if an error occurs; otherwise `text` will contain the blob contents, +* and `[blockBlob]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getBlobToText = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getBlobToText', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.get(resourceName) + .withRawResponse(); + + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + BlobResult.setHeadersFromBlob(webResource, options); + this._setRangeContentMD5Header(webResource, options); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.text = null; + responseObject.blobResult = null; + + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.text = responseObject.response.body; + + self._validateLengthAndMD5(options, responseObject); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.text, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +* If a blob has snapshots, you must delete them when deleting the blob. Using the deleteSnapshots option, you can choose either to delete both the blob and its snapshots, +* or to delete only the snapshots but not the blob itself. If the blob has snapshots, you must include the deleteSnapshots option or the blob service will return an error +* and nothing will be deleted. +* If you are deleting a specific snapshot using the snapshotId option, the deleteSnapshots option must NOT be included. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.BlobUtilities.SnapshotDeleteOptions.*. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; `response` will contain information related to this operation. +*/ +BlobService.prototype.deleteBlob = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.del(resourceName) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + if (!azureutil.objectIsNull(options.snapshotId) && !azureutil.objectIsNull(options.deleteSnapshots)) { + throw new ArgumentError('options', SR.INVALID_DELETE_SNAPSHOT_OPTION); + } + + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + webResource.withHeader(HeaderConstants.DELETE_SNAPSHOT, options.deleteSnapshots); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot. +* Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; `response` will contain information related to this operation. +*/ +BlobService.prototype.undeleteBlob = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'undelete'); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a blob exists on the service. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information including the `exists` boolean member. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.doesBlobExist = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('doesBlobExist', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._doesBlobExist(container, blob, false, options, callback); +}; + +/** +* Marks the specified blob or snapshot for deletion if it exists. The blob is later deleted during garbage collection. +* If a blob has snapshots, you must delete them when deleting the blob. Using the deleteSnapshots option, you can choose either to delete both the blob and its snapshots, +* or to delete only the snapshots but not the blob itself. If the blob has snapshots, you must include the deleteSnapshots option or the blob service will return an error +* and nothing will be deleted. +* If you are deleting a specific snapshot using the snapshotId option, the deleteSnapshots option must NOT be included. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.BlobUtilities.SnapshotDeleteOptions.*. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the blob was deleted, or false if the blob +* does not exist. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.deleteBlobIfExists = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteBlobIfExists', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesBlobExist(container, blob, true, options, function (error, existsResult, response) { + if (error) { + callback(error, existsResult.exists, response); + } else if (!existsResult.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteBlob(container, blob, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError) { + deleted = true; + } else if (deleteError && deleteError.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.BlobErrorCodeStrings.BLOB_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Creates a read-only snapshot of a blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the ID of the snapshot. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlobSnapshot = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createBlobSnapshot', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'snapshot'); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.snapshotId = null; + if (!responseObject.error) { + responseObject.snapshotId = responseObject.response.headers[HeaderConstants.SNAPSHOT]; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.snapshotId, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Starts to copy a blob or an Azure Storage file to a destination blob. +* +* For an asynchronous copy(by default), this operation returns a object including a copy ID which +* you can use to check or abort the copy operation. The Blob service copies blobs on a best-effort basis. +* The source blob for an asynchronous copy operation may be a block blob, an append blob, +* a page blob or an Azure Storage file. +* +* Refer to https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob for more details. +* +* @this {BlobService} +* @param {string} sourceUri The source blob URI. +* @param {string} targetContainer The target container name. +* @param {string} targetBlob The target blob name. +* @param {object} [options] The request options. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {boolean} [options.isIncrementalCopy] If it's incremental copy or not. Refer to https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob +* @param {string} [options.snapshotId] The source blob snapshot identifier. +* @param {object} [options.metadata] The target blob metadata key/value pairs. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.sourceLeaseId] The source blob lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {AccessConditions} [options.sourceAccessConditions] The source access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.startCopyBlob = function (sourceUri, targetContainer, targetBlob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('startCopyBlob', function (v) { + v.string(sourceUri, 'sourceUri'); + v.string(targetContainer, 'targetContainer'); + v.string(targetBlob, 'targetBlob'); + v.containerNameIsValid(targetContainer); + v.callback(callback); + }); + + var targetResourceName = createResourceName(targetContainer, targetBlob); + + var options = extend(true, {}, userOptions); + + if (options.snapshotId) { + var uri = url.parse(sourceUri, true); + if (uri.query['snapshot']) { + throw new ArgumentError('options.snapshotId', 'Duplicate snapshot supplied in both the source uri and option.'); + } + + uri.search = undefined; + uri.query['snapshot'] = options.snapshotId; + + sourceUri = url.format(uri); + } + + var webResource = WebResource.put(targetResourceName) + .withHeader(HeaderConstants.COPY_SOURCE, sourceUri); + + if (options.isIncrementalCopy) { + webResource.withQueryOption(QueryStringConstants.COMP, 'incrementalcopy'); + } + + webResource.withHeader(HeaderConstants.ACCESS_TIER, options.blobTier); + webResource.withHeader(HeaderConstants.LEASE_ID, options.leaseId); + webResource.withHeader(HeaderConstants.SOURCE_LEASE_ID, options.sourceLeaseId); + webResource.addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(targetContainer, targetBlob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + + if (options.metadata) { + responseObject.blobResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Abort a blob copy operation. +* +* @this {BlobService} +* @param {string} container The destination container name. +* @param {string} blob The destination blob name. +* @param {string} copyId The copy operation identifier. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +BlobService.prototype.abortCopyBlob = function (container, blob, copyId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('abortCopyBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var resourceName = createResourceName(container, blob); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COPY_ID, copyId) + .withQueryOption(QueryStringConstants.COMP, 'copy') + .withHeader(HeaderConstants.COPY_ACTION, 'abort'); + + webResource.withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Retrieves a shared access signature token. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} [blob] The blob name. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired (The UTC value will be used). +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {object} [headers] The optional header values to set for a blob returned wth this SAS. +* @param {string} [headers.cacheControl] The optional value of the Cache-Control response header to be returned when this SAS is used. +* @param {string} [headers.contentType] The optional value of the Content-Type response header to be returned when this SAS is used. +* @param {string} [headers.contentEncoding] The optional value of the Content-Encoding response header to be returned when this SAS is used. +* @param {string} [headers.contentLanguage] The optional value of the Content-Language response header to be returned when this SAS is used. +* @param {string} [headers.contentDisposition] The optional value of the Content-Disposition response header to be returned when this SAS is used. +* @return {string} The shared access signature query string. Note this string does not contain the leading "?". +*/ +BlobService.prototype.generateSharedAccessSignature = function (container, blob, sharedAccessPolicy, headers) { + // check if the BlobService is able to generate a shared access signature + if (!this.storageCredentials) { + throw new ArgumentNullError('storageCredentials'); + } + + if (!this.storageCredentials.generateSignedQueryString) { + throw new ArgumentError('storageCredentials', SR.CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY); + } + + // Validate container name. Blob name is optional. + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.object(sharedAccessPolicy, 'sharedAccessPolicy'); + }); + + var resourceType = BlobConstants.ResourceTypes.CONTAINER; + if (blob) { + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(blob, 'blob'); + }); + resourceType = BlobConstants.ResourceTypes.BLOB; + } + + if (sharedAccessPolicy.AccessPolicy) { + if (!azureutil.objectIsNull(sharedAccessPolicy.AccessPolicy.Start)) { + if (!_.isDate(sharedAccessPolicy.AccessPolicy.Start)) { + sharedAccessPolicy.AccessPolicy.Start = new Date(sharedAccessPolicy.AccessPolicy.Start); + } + + sharedAccessPolicy.AccessPolicy.Start = azureutil.truncatedISO8061Date(sharedAccessPolicy.AccessPolicy.Start); + } + + if (!azureutil.objectIsNull(sharedAccessPolicy.AccessPolicy.Expiry)) { + if (!_.isDate(sharedAccessPolicy.AccessPolicy.Expiry)) { + sharedAccessPolicy.AccessPolicy.Expiry = new Date(sharedAccessPolicy.AccessPolicy.Expiry); + } + + sharedAccessPolicy.AccessPolicy.Expiry = azureutil.truncatedISO8061Date(sharedAccessPolicy.AccessPolicy.Expiry); + } + } + + var resourceName = createResourceName(container, blob, true); + return this.storageCredentials.generateSignedQueryString(Constants.ServiceType.Blob, resourceName, sharedAccessPolicy, null, { headers: headers, resourceType: resourceType }); +}; + +/** +* Retrieves a blob or container URL. +* +* @param {string} container The container name. +* @param {string} [blob] The blob name. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. +* @param {string} [snapshotId] The snapshot identifier. +* @return {string} The formatted URL string. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var sharedAccessPolicy = { +* AccessPolicy: { +* Permissions: azure.BlobUtilities.SharedAccessPermissions.READ, +* Start: startDate, +* Expiry: expiryDate +* }, +* }; +* +* var sasToken = blobService.generateSharedAccessSignature(containerName, blobName, sharedAccessPolicy); +* var sasUrl = blobService.getUrl(containerName, blobName, sasToken); +*/ +BlobService.prototype.getUrl = function (container, blob, sasToken, primary, snapshotId) { + validate.validateArgs('getUrl', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + }); + + var host; + if (!azureutil.objectIsNull(primary) && primary === false) { + host = this.host.secondaryHost; + } else { + host = this.host.primaryHost; + } + + host = azureutil.trimPortFromUri(host); + if (host && host.lastIndexOf('/') !== (host.length - 1)) { + host = host + '/'; + } + + var query = qs.parse(sasToken); + if (snapshotId) { + query[QueryStringConstants.SNAPSHOT] = snapshotId; + } + + var fullPath = url.format({ pathname: this._getPath(createResourceName(container, blob)), query: query }); + return url.resolve(host, fullPath); +}; + +// Page blob methods + +/** +* Creates a page blob of the specified length. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {int} length The length of the page blob in bytes. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The MD5 hash of the blob content. +* @param {string} [options.sequenceNumber] The blob's sequence number. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createPageBlob = function (container, blob, length, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createPageBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.value(length, 'length'); + v.callback(callback); + }); + + if (length && length % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_BLOB_LENGTH); + } + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + + var webResource = WebResource.put(resourceName) + .withHeader(HeaderConstants.BLOB_TYPE, BlobConstants.BlobTypes.PAGE) + .withHeader(HeaderConstants.BLOB_CONTENT_LENGTH, length) + .withHeader(HeaderConstants.CONTENT_LENGTH, 0) + .withHeader(HeaderConstants.ACCESS_TIER, options.blobTier) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Uploads a page blob from a stream. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createPageBlobFromStream = function (container, blob, stream, streamLength, optionsOrCallback, callback) { + return this._createBlobFromStream(container, blob, BlobConstants.BlobTypes.PAGE, stream, streamLength, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to a page blob. Assumes that the blob exists. +* If it does not, please create the blob using createPageBlob before calling this method or use createWriteStreamNewPageBlob. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.createPageBlob(containerName, blobName, 1024, function (err) { +* // Pipe file to a blob +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToExistingPageBlob(containerName, blobName)); +* }); +*/ +BlobService.prototype.createWriteStreamToExistingPageBlob = function (container, blob, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.PAGE, 0, false, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to a page blob. Creates the blob before writing data. If the blob already exists on the service, it will be overwritten. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} length The blob length. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.createPageBlob(containerName, blobName, 1024, function (err) { +* // Pipe file to a blob +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToNewPageBlob(containerName, blobName)); +* }); +*/ +BlobService.prototype.createWriteStreamToNewPageBlob = function (container, blob, length, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.PAGE, length, true, optionsOrCallback, callback); +}; + +/** +* Updates a page blob from a stream. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.transactionalContentMD5] An optional hash value used to ensure transactional integrity for the page. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the page information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createPagesFromStream = function (container, blob, readStream, rangeStart, rangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createPagesFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if ((rangeEnd - rangeStart) + 1 > BlobConstants.MAX_UPDATE_PAGE_SIZE) { + throw new RangeError(SR.INVALID_PAGE_RANGE_FOR_UPDATE); + } + + var self = this; + if (azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + azureutil.calculateMD5(readStream, BlobConstants.MAX_UPDATE_PAGE_SIZE, options, function (internalBuff, contentMD5) { + options.transactionalContentMD5 = contentMD5; + self._createPages(container, blob, internalBuff, null /* stream */, rangeStart, rangeEnd, options, callback); + }); + } else { + self._createPages(container, blob, null /* text */, readStream, rangeStart, rangeEnd, options, callback); + } +}; + +/** +* Lists page ranges. Lists all of the page ranges by default, or only the page ranges over a specific range of bytes if rangeStart and rangeEnd are specified. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [options.rangeStart] The range start. +* @param {int} [options.rangeEnd] The range end. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the page ranges information, see `[Range]{@link Range}` for detailed information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listPageRanges = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listPageRanges', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'pagelist') + .withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + if (options.rangeStart && options.rangeStart % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_START_OFFSET); + } + + if (options.rangeEnd && (options.rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_END_OFFSET); + } + + BlobResult.setHeadersFromBlob(webResource, options); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.pageRanges = null; + if (!responseObject.error) { + responseObject.pageRanges = []; + + var pageRanges = []; + if (responseObject.response.body.PageList.PageRange) { + pageRanges = responseObject.response.body.PageList.PageRange; + + if (!_.isArray(pageRanges)) { + pageRanges = [pageRanges]; + } + } + + pageRanges.forEach(function (pageRange) { + var range = { + start: parseInt(pageRange.Start, 10), + end: parseInt(pageRange.End, 10) + }; + + responseObject.pageRanges.push(range); + }); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.pageRanges, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets page ranges that have been updated or cleared since the snapshot specified by `previousSnapshotTime` was taken. Gets all of the page ranges by default, or only the page ranges over a specific range of bytes if rangeStart and rangeEnd are specified. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} previousSnapshotTime The previous snapshot time for comparison. Must be prior to `options.snapshotId` if it's provided. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [options.rangeStart] The range start. +* @param {int} [options.rangeEnd] The range end. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the page ranges diff information, see `[RangeDiff]{@link RangeDiff}` for detailed information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getPageRangesDiff = function (container, blob, previousSnapshotTime, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getPageRangesDiff', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'pagelist') + .withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId) + .withQueryOption(QueryStringConstants.PREV_SNAPSHOT, previousSnapshotTime); + + if (options.rangeStart && options.rangeStart % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_START_OFFSET); + } + + if (options.rangeEnd && (options.rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_END_OFFSET); + } + + if (options.rangeEnd && (options.rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_END_OFFSET); + } + + BlobResult.setHeadersFromBlob(webResource, options); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.pageRangesDiff = null; + if (!responseObject.error) { + responseObject.pageRangesDiff = []; + + if (responseObject.response.body.PageList.PageRange) { + var updatedPageRanges = responseObject.response.body.PageList.PageRange; + + if (!_.isArray(updatedPageRanges)) { + updatedPageRanges = [updatedPageRanges]; + } + + updatedPageRanges.forEach(function (pageRange) { + var range = { + start: parseInt(pageRange.Start, 10), + end: parseInt(pageRange.End, 10), + isCleared: false + }; + + responseObject.pageRangesDiff.push(range); + }); + } + + if (responseObject.response.body.PageList.ClearRange) { + var clearedPageRanges = responseObject.response.body.PageList.ClearRange; + + if (!_.isArray(clearedPageRanges)) { + clearedPageRanges = [clearedPageRanges]; + } + + clearedPageRanges.forEach(function (pageRange) { + var range = { + start: parseInt(pageRange.Start, 10), + end: parseInt(pageRange.End, 10), + isCleared: true + }; + + responseObject.pageRangesDiff.push(range); + }); + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.pageRangesDiff, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Clears a range of pages. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.clearPageRange = function (container, blob, rangeStart, rangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('clearPageRange', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var request = this._updatePageBlobPagesImpl(container, blob, rangeStart, rangeEnd, BlobConstants.PageWriteOptions.CLEAR, options); + + var self = this; + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + self.performRequest(request, null, options, processResponseCallback); +}; + +/** +* Resizes a page blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {String} size The size of the page blob, in bytes. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The blob lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the page information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.resizePageBlob = function (container, blob, size, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('resizePageBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + if (size && size % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_BLOB_LENGTH); + } + + webResource.withHeader(HeaderConstants.BLOB_CONTENT_LENGTH, size); + + this._setBlobPropertiesHelper({ + webResource: webResource, + options: options, + container: container, + blob: blob, + callback: callback + }); + +}; + +/** +* Sets the page blob's sequence number. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {SequenceNumberAction} sequenceNumberAction A value indicating the operation to perform on the sequence number. +* The allowed values are defined in azure.BlobUtilities.SequenceNumberAction. +* @param {string} sequenceNumber The sequence number. The value of the sequence number must be between 0 and 2^63 - 1. +* Set this parameter to null if this operation is an increment action. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the page information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setPageBlobSequenceNumber = function (container, blob, sequenceNumberAction, sequenceNumber, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setPageBlobSequenceNumber', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + if (sequenceNumberAction === BlobUtilities.SequenceNumberAction.INCREMENT) { + if (!azureutil.objectIsNull(sequenceNumber)) { + throw new ArgumentError('sequenceNumber', SR.BLOB_INVALID_SEQUENCE_NUMBER); + } + } else { + if (azureutil.objectIsNull(sequenceNumber)) { + throw new ArgumentNullError('sequenceNumber', util.format(SR.ARGUMENT_NULL_OR_EMPTY, 'sequenceNumber')); + } + } + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withHeader(HeaderConstants.SEQUENCE_NUMBER_ACTION, sequenceNumberAction); + + if (sequenceNumberAction !== BlobUtilities.SequenceNumberAction.INCREMENT) { + webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER, sequenceNumber); + } + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// Block blob methods + +/** +* Uploads a block blob from a stream. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createBlockBlobFromStream = function (container, blob, stream, streamLength, optionsOrCallback, callback) { + return this._createBlobFromStream(container, blob, BlobConstants.BlobTypes.BLOCK, stream, streamLength, optionsOrCallback, callback); +}; + +/** +* Uploads a block blob from a text string. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|object} text The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlockBlobFromText = function (container, blob, text, optionsOrCallback, callback) { + return this._createBlobFromText(container, blob, BlobConstants.BlobTypes.BLOCK, text, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to a block blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToBlockBlob(containerName, blobName, { blockIdPrefix: 'block' })); +*/ +BlobService.prototype.createWriteStreamToBlockBlob = function (container, blob, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.BLOCK, 0, false, optionsOrCallback, callback); +}; + +/** +* Creates a new block to be committed as part of a blob. +* +* @this {BlobService} +* @param {string} blockId The block identifier. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} streamLength The stream length. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlockFromStream = function (blockId, container, blob, readStream, streamLength, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createBlockFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.exists(readStream, 'readStream'); + v.value(streamLength, 'streamLength'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if (streamLength > BlobConstants.MAX_BLOCK_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_STREAM_LENGTH); + } else { + this._createBlock(blockId, container, blob, null, readStream, streamLength, options, callback); + } +}; + +/** +* Creates a new block to be committed as part of a blob. +* +* @this {BlobService} +* @param {string} blockId The block identifier. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|buffer} content The block content. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlockFromText = function (blockId, container, blob, content, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createBlockFromText', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var contentLength = (Buffer.isBuffer(content) ? content.length : Buffer.byteLength(content)); + + if (contentLength > BlobConstants.MAX_BLOCK_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_TEXT_LENGTH); + } else { + this._createBlock(blockId, container, blob, content, null, contentLength, options, callback); + } +}; + +/** +* Creates a new block to be committed as part of a blob from an URL of an Azure blob or file. +* +* @this {BlobService} +* @param {string} blockId The block identifier. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} sourceURL The URL of the source data. +* It can point to any Azure Blob or File, that is either public or has a shared access signature attached. +* @param {int} sourceRangeStart The start of the range of bytes(inclusive) that has to be taken from the copy source. +* @param {int} sourceRangeEnd The end of the range of bytes(inclusive) that has to be taken from the copy source. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlockFromURL = function (blockId, container, blob, sourceURL, sourceRangeStart, sourceRangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createBlockFromURL', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.string(sourceURL, 'sourceURL'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'block') + .withQueryOption(QueryStringConstants.BLOCK_ID, Buffer.from(blockId).toString('base64')) + .withHeader(HeaderConstants.COPY_SOURCE, sourceURL); + + options.sourceRangeStart = sourceRangeStart; + options.sourceRangeEnd = sourceRangeEnd; + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new block to be committed as part of a block blob. +* @ignore +* +* @this {BlobService} +* @param {string} blockId The block identifier. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|buffer} content The block content. +* @param {Stream} stream The stream to the data to store. +* @param {int} length The length of the stream or text to upload. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype._createBlock = function (blockId, container, blob, content, stream, length, options, callback) { + var resourceName = createResourceName(container, blob); + + var self = this; + var startCreateBlock = function () { + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'block') + .withQueryOption(QueryStringConstants.BLOCK_ID, Buffer.from(blockId).toString('base64')) + .withHeader(HeaderConstants.CONTENT_LENGTH, length); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if (!azureutil.objectIsNull(content)) { + self.performRequest(webResource, content, options, processResponseCallback); + } else { + self.performRequestOutputStream(webResource, stream, options, processResponseCallback); + } + }; + + if (azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + if (!azureutil.objectIsNull(content)) { + options.transactionalContentMD5 = azureutil.getContentMd5(content); + startCreateBlock(); + } else { + azureutil.calculateMD5(stream, length, options, function (internalBuff, contentMD5) { + options.transactionalContentMD5 = contentMD5; + content = internalBuff; + length = internalBuff.length; + startCreateBlock(); + }); + } + } else { + startCreateBlock(); + } +}; + +/** +* Writes a blob by specifying the list of block IDs that make up the blob. +* In order to be written as part of a blob, a block must have been successfully written to the server in a prior +* createBlock operation. +* Note: If no valid list is specified in the blockList parameter, blob would be updated with empty content, +* i.e. existing blocks in the blob will be removed, this behavior is kept for backward compatibility consideration. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} blockList The wrapper for block ID list contains block IDs that make up the blob. +* Three kinds of list are provided, please choose one to use according to requirement. +* For more background knowledge, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list +* @param {string[]} [blockList.LatestBlocks] The list contains block IDs that make up the blob sequentially. +* All the block IDs in this list will be specified within Latest element. +* Choose this list to contain block IDs indicates that the Blob service should first search +* the uncommitted block list, and then the committed block list for the named block. +* @param {string[]} [blockList.CommittedBlocks] The list contains block IDs that make up the blob sequentially. +* All the block IDs in this list will be specified within Committed element. +* Choose this list to contain block IDs indicates that the Blob service should only search +* the committed block list for the named block. +* @param {string[]} [blockList.UncommittedBlocks] The list contains block IDs that make up the blob sequentially. +* All the block IDs in this list will be specified within Uncommitted element. +* Choose this list to contain block IDs indicates that the Blob service should only search +* the uncommitted block list for the named block. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.createBlockFromText("sampleBlockName", containerName, blobName, "sampleBlockContent", function(error) { +* assert.equal(error, null); +* // In this example, LatestBlocks is used, we hope the Blob service first search +* // the uncommitted block list, and then the committed block list for the named block "sampleBlockName", +* // and thus make sure the block is with latest content. +* blobService.commitBlocks(containerName, blobName, { LatestBlocks: ["sampleBlockName"] }, function(error) { +* assert.equal(error, null); +* }); +* }); +* + */ +BlobService.prototype.commitBlocks = function (container, blob, blockList, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('commitBlocks', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.object(blockList, 'blockList'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var blockListXml = BlockListResult.serialize(blockList); + + var resourceName = createResourceName(container, blob); + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'blocklist') + .withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(blockListXml)) + .withBody(blockListXml); + + BlobResult.setPropertiesFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.list = null; + if (!responseObject.error) { + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.blobResult.list = blockList; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieves the list of blocks that have been uploaded as part of a block blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlockListFilter} blocklisttype The type of block list to retrieve. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The source blob snapshot identifier. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the blocklist information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlocks = function (container, blob, blocklisttype, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listBlocks', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var resourceName = createResourceName(container, blob); + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'blocklist') + .withQueryOption(QueryStringConstants.BLOCK_LIST_TYPE, blocklisttype) + .withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.blockListResult = null; + if (!responseObject.error) { + responseObject.blockListResult = BlockListResult.parse(responseObject.response.body.BlockList); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blockListResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Generate a random block id prefix +*/ +BlobService.prototype.generateBlockIdPrefix = function () { + var prefix = Math.floor(Math.random() * 0x100000000).toString(16); + return azureutil.zeroPaddingString(prefix, 8); +}; + +/** +* Get a block id according to prefix and block number +*/ +BlobService.prototype.getBlockId = function (prefix, number) { + return prefix + '-' + azureutil.zeroPaddingString(number, 6); +}; + +// Append blob methods + +/** +* Creates an empty append blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createOrReplaceAppendBlob = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createOrReplaceAppendBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + + var webResource = WebResource.put(resourceName) + .withHeader(HeaderConstants.BLOB_TYPE, BlobConstants.BlobTypes.APPEND) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId) + .withHeader(HeaderConstants.CONTENT_LENGTH, 0); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Uploads an append blob from a stream. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If you want to append data to an already existing blob, please look at appendFromStream. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createAppendBlobFromStream = function (container, blob, stream, streamLength, optionsOrCallback, callback) { + return this._createBlobFromStream(container, blob, BlobConstants.BlobTypes.APPEND, stream, streamLength, optionsOrCallback, callback); +}; + +/** +* Uploads an append blob from a text string. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If you want to append data to an already existing blob, please look at appendFromText. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|object} text The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createAppendBlobFromText = function (container, blob, text, optionsOrCallback, callback) { + return this._createBlobFromText(container, blob, BlobConstants.BlobTypes.APPEND, text, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to a new append blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback The callback function. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToAppendBlob(containerName, blobName)); +*/ +BlobService.prototype.createWriteStreamToNewAppendBlob = function (container, blob, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.APPEND, 0, true, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to an existing append blob. Assumes that the blob exists. +* If it does not, please create the blob using createAppendBlob before calling this method or use createWriteStreamToNewAppendBlob. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback The callback function. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToAppendBlob(containerName, blobName)); +*/ +BlobService.prototype.createWriteStreamToExistingAppendBlob = function (container, blob, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.APPEND, 0, false, optionsOrCallback, callback); +}; + +/** +* Appends to an append blob from a stream. Assumes the blob already exists on the service. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.appendFromStream = function (container, blob, stream, streamLength, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('appendFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.exists(stream, 'stream'); + v.value(streamLength, 'streamLength'); + v.callback(callback); + }); + + return this._uploadBlobFromStream(false, container, blob, BlobConstants.BlobTypes.APPEND, stream, streamLength, options, callback); +}; + +/** +* Appends to an append blob from a text string. Assumes the blob already exists on the service. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|object} text The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.appendFromText = function (container, blob, text, optionsOrCallback, callback) { + return this._uploadBlobFromText(false, container, blob, BlobConstants.BlobTypes.APPEND, text, optionsOrCallback, callback); +}; + + +/** +* Creates a new block from a read stream to be appended to an append blob. +* If the sequence of data to be appended is important, please use this API strictly in a single writer. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If the sequence of data to be appended is not important, this API can be used in parallel, +* in this case, options.appendPosition can be left without settings. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} streamLength The stream length. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. +* @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.appendBlockFromStream = function (container, blob, readStream, streamLength, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('appendBlockFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.exists(readStream, 'readStream'); + v.value(streamLength, 'streamLength'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if (streamLength > BlobConstants.MAX_APPEND_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_STREAM_LENGTH); + } else { + this._appendBlock(container, blob, null, readStream, streamLength, options, callback); + } +}; + +/** +* Creates a new block from a text to be appended to an append blob. +* If the sequence of data to be appended is important, please use this API strictly in a single writer. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If the sequence of data to be appended is not important, this API can be used in parallel, +* in this case, options.appendPosition can be left without settings. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|object} content The block text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. +* @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.appendBlockFromText = function (container, blob, content, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('appendBlockFromText', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var contentLength = (Buffer.isBuffer(content) ? content.length : Buffer.byteLength(content)); + if (contentLength > BlobConstants.MAX_APPEND_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_TEXT_LENGTH); + } else { + this._appendBlock(container, blob, content, null, contentLength, options, callback); + } +}; + +// Private methods + +/** +* Creates a new blob from a stream. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* @return {SpeedSummary} +*/ +BlobService.prototype._createBlobFromStream = function (container, blob, blobType, stream, streamLength, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createBlobFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + v.exists(stream, 'stream'); + v.value(streamLength, 'streamLength'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var self = this; + var creationCallback = function (createError, createBlob, createResponse) { + if (createError) { + callback(createError, createBlob, createResponse); + } else { + self._uploadBlobFromStream(true, container, blob, blobType, stream, streamLength, options, callback); + } + }; + + this._createBlob(container, blob, blobType, streamLength, options, creationCallback); + + return options.speedSummary; +}; + +/** +* Uploads a block blob or an append blob from a text string. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {string|buffer} content The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._createBlobFromText = function (container, blob, blobType, content, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createBlobFromText', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var self = this; + var creationCallback = function (createError, createBlob, createResponse) { + if (createError) { + callback(createError, createBlob, createResponse); + } else { + self._uploadBlobFromText(true, container, blob, blobType, content, options, callback); + } + }; + + var contentLength = azureutil.objectIsNull(content) ? 0 : ((Buffer.isBuffer(content) ? content.length : Buffer.byteLength(content))); + this._createBlob(container, blob, blobType, contentLength, options, creationCallback); + + return options.speedSummary; +}; + +/** +* Provides a stream to write to a block blob or an append blob. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {int} length The blob length. +* @param {bool} createNewBlob Specifies whether create a new blob. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.blockSize] The size of each block. Maximum is 100MB. (For block blob only) +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback The callback function. +* @return {Writable} A Node.js Writable stream. +*/ +BlobService.prototype._createWriteStreamToBlob = function (container, blob, blobType, length, createNewBlob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createWriteStreamToBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + }); + + var options = extend(true, {}, userOptions); + + var sizeLimitation; + if (blobType === BlobConstants.BlobTypes.BLOCK) { + // default to true, unless explicitly set to false + options.storeBlobContentMD5 = options.storeBlobContentMD5 === false ? false : true; + sizeLimitation = options.blockSize || BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + } else if (blobType == BlobConstants.BlobTypes.PAGE) { + sizeLimitation = BlobConstants.DEFAULT_WRITE_PAGE_SIZE_IN_BYTES; + } else if (blobType == BlobConstants.BlobTypes.APPEND) { + sizeLimitation = BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + } + + var stream = new ChunkStream({ calcContentMd5: options.storeBlobContentMD5 }); + stream._highWaterMark = sizeLimitation; + + stream.pause(); //Immediately pause the stream in order to wait for the destination to getting ready + + var self = this; + var createCallback = function (createError, createBlob, createResponse) { + if (createError) { + if (callback) { + callback(createError, createBlob, createResponse); + } + } else { + self._uploadBlobFromStream(createNewBlob, container, blob, blobType, stream, null, options, function (error, blob, response) { + if (error) { + stream.emit('error', error); + } + + if (callback) { + callback(error, blob, response); + } + }); + } + }; + + if (createNewBlob === true) { + this._createBlob(container, blob, blobType, length, options, createCallback); + } else { + createCallback(); + } + + return stream; +}; + +/** +* Upload blob content from a stream. Assumes the blob already exists. +* +* @ignore +* +* @this {BlobService} +* @param {bool} isNewBlob Specifies whether the blob is newly created. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* @return {SpeedSummary} +*/ +BlobService.prototype._uploadBlobFromStream = function (isNewBlob, container, blob, blobType, stream, streamLength, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + if (blobType === BlobConstants.BlobTypes.BLOCK) { + // default to true, unless explicitly set to false + options.storeBlobContentMD5 = options.storeBlobContentMD5 === false ? false : true; + } + + stream.pause(); + + var self = this; + var startUpload = function () { + var putBlockBlobFromStream = function () { + if (streamLength > 0 && azureutil.objectIsNull(azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null)) && options.storeBlobContentMD5) { + azureutil.calculateMD5(stream, Math.min(self.singleBlobPutThresholdInBytes, streamLength), options, function (internalBuff, contentMD5) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], contentMD5); + self._putBlockBlob(container, blob, internalBuff, null, internalBuff.length, options, callback); + }); + stream.resume(); + } else { + // Stream will resume when it has a pipe destination or a 'data' listener + self._putBlockBlob(container, blob, null, stream, streamLength, options, callback); + } + }; + + if (streamLength === null || streamLength >= self.singleBlobPutThresholdInBytes || blobType !== BlobConstants.BlobTypes.BLOCK) { + var chunkStream = new ChunkStreamWithStream(stream, { calcContentMd5: options.storeBlobContentMD5 }); + self._uploadContentFromChunkStream(container, blob, blobType, chunkStream, streamLength, options, callback); + } else { + putBlockBlobFromStream(); + } + }; + + if (!isNewBlob) { + if (options.storeBlobContentMD5 && blobType !== BlobConstants.BlobTypes.BLOCK) { + throw new Error(SR.MD5_NOT_POSSIBLE); + } + + if (blobType === BlobConstants.BlobTypes.APPEND || options.accessConditions) { + // Do a getBlobProperties right at the beginning for existing blobs and use the user passed in access conditions. + // So any pre-condition failure on the first block (in a strictly single writer scenario) is caught. + // This call also helps us get the append position to append to if the user hasn’t specified an access condition. + this.getBlobProperties(container, blob, options, function (error, properties, response) { + if (error && !(options.accessConditions && options.accessConditions.EtagNonMatch === '*' && response.statusCode === 400)) { + callback(error); + } else { + if (blobType === BlobConstants.BlobTypes.APPEND) { + options.appendPosition = properties.contentLength; + } + + startUpload(); + } + }); + } else { + startUpload(); + } + } else { + startUpload(); + } + + return options.speedSummary; +}; + +/** +* Upload blob content from a text. Assumes the blob already exists. +* +* @ignore +* +* @this {BlobService} +* @param {bool} isNewBlob Specifies whether the blob is newly created. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param (string) content The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* @return {SpeedSummary} +*/ +BlobService.prototype._uploadBlobFromText = function (isNewBlob, container, blob, blobType, content, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + options[HeaderConstants.CONTENT_TYPE] = (options.contentSettings && options.contentSettings.contentType) || 'text/plain;charset="utf-8"'; + + var self = this; + var startUpload = function () { + var operationFunc; + var length = azureutil.objectIsNull(content) ? 0 : (Buffer.isBuffer(content) ? content.length : Buffer.byteLength(content)); + + if (blobType === BlobConstants.BlobTypes.BLOCK) { + // default to true, unless explicitly set to false + options.storeBlobContentMD5 = options.storeBlobContentMD5 === false ? false : true; + operationFunc = self._putBlockBlob; + + if (length > BlobConstants.MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES) { + throw new RangeError(SR.INVALID_BLOB_LENGTH); + } + } else if (blobType === BlobConstants.BlobTypes.APPEND) { + operationFunc = self._appendBlock; + + if (length > BlobConstants.MAX_APPEND_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_TEXT_LENGTH); + } + } + + var finalCallback = function (error, blobResult, response) { + if (blobType !== BlobConstants.BlobTypes.BLOCK) { + self.setBlobProperties(container, blob, options.contentSettings, options, function (error, blob, response) { + blob = extend(false, blob, blobResult); + callback(error, blob, response); + }); + } else { + callback(error, blobResult, response); + } + }; + + operationFunc.call(self, container, blob, content, null, length, options, finalCallback); + }; + + if (!isNewBlob) { + if (options.storeBlobContentMD5 && blobType !== BlobConstants.BlobTypes.BLOCK) { + throw new Error(SR.MD5_NOT_POSSIBLE); + } + + if (blobType === BlobConstants.BlobTypes.APPEND || options.accessConditions) { + // Do a getBlobProperties right at the beginning for existing blobs and use the user passed in access conditions. + // So any pre-condition failure on the first block (in a strictly single writer scenario) is caught. + // This call also helps us get the append position to append to if the user hasn’t specified an access condition. + this.getBlobProperties(container, blob, options, function (error, properties) { + if (error) { + callback(error); + } else { + if (blobType === BlobConstants.BlobTypes.APPEND) { + options.appendPosition = properties.contentLength; + } + + startUpload(); + } + }); + } + } else { + if (!azureutil.objectIsNull(content) && azureutil.objectIsNull(azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null)) && options.storeBlobContentMD5) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], azureutil.getContentMd5(content)); + } + startUpload(); + } +}; + +/** +* Uploads a block blob from a stream. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} text The blob text. +* @param (Stream) stream Stream to the data to store. +* @param {int} length The length of the stream or text to upload. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._putBlockBlob = function (container, blob, text, stream, length, options, callback) { + if (!options.speedSummary) { + options.speedSummary = new SpeedSummary(blob); + } + + var speedSummary = options.speedSummary; + speedSummary.totalSize = length; + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/octet-stream') + .withHeader(HeaderConstants.BLOB_TYPE, BlobConstants.BlobTypes.BLOCK) + .withHeader(HeaderConstants.CONTENT_LENGTH, length); + + if (!azureutil.objectIsNull(text) && azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + options.transactionalContentMD5 = azureutil.getContentMd5(text); + } + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + if (options.metadata) { + responseObject.blobResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + if (!returnObject || !returnObject.error) { + speedSummary.increment(length); + } + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if (!azureutil.objectIsNull(text)) { + this.performRequest(webResource, text, options, processResponseCallback); + } else { + this.performRequestOutputStream(webResource, stream, options, processResponseCallback); + } + + return options.speedSummary; +}; + +/** +* Appends a new block to an append blob. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|buffer} content The block content. +* @param (Stream) stream The stream to the data to store. +* @param {int} length The length of the stream or content to upload. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. +* @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] The blob’s MD5 hash. This hash is used to verify the integrity of the blob during transport. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype._appendBlock = function (container, blob, content, stream, length, options, callback) { + var speedSummary = options.speedSummary || new SpeedSummary(blob); + speedSummary.totalSize = length; + + var self = this; + var startAppendBlock = function () { + var resourceName = createResourceName(container, blob); + + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'appendblock') + .withHeader(HeaderConstants.CONTENT_LENGTH, length) + .withHeader(HeaderConstants.BLOB_CONDITION_MAX_SIZE, options.maxBlobSize) + .withHeader(HeaderConstants.BLOB_CONDITION_APPEND_POSITION, options.appendPosition); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + if (!returnObject || !returnObject.error) { + speedSummary.increment(length); + } + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if (!azureutil.objectIsNull(content)) { + self.performRequest(webResource, content, options, processResponseCallback); + } else { + self.performRequestOutputStream(webResource, stream, options, processResponseCallback); + } + }; + + if (azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + if (!azureutil.objectIsNull(content)) { + options.transactionalContentMD5 = azureutil.getContentMd5(content); + startAppendBlock(); + } else { + azureutil.calculateMD5(stream, length, options, function (internalBuff, contentMD5) { + options.transactionalContentMD5 = contentMD5; + content = internalBuff; + length = internalBuff.length; + startAppendBlock(); + }); + } + } else { + startAppendBlock(); + } + + return options.speedSummary; +}; + +/** +* Creates and dispatches lease requests. +* @ignore +* +* @this {BlobService} +* @param {object} webResource The web resource. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} leaseId The lease identifier. Required to renew, change or release the lease. +* @param {string} leaseAction The lease action (BlobConstants.LeaseOperation.*). Required. +* @param {object} userOptions The request options. +* @param {int} [userOptions.leaseBreakPeriod] The lease break period. +* @param {string} [userOptions.leaseDuration] The lease duration. Default is never to expire. +* @param {string} [userOptions.proposedLeaseId] The proposed lease identifier. This is required for the CHANGE lease action. +* @param {LocationMode} [userOptions.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [userOptions.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [userOptions.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, lease, response)} callback `error` will contain information +* if an error occurs; otherwise `lease` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._leaseImpl = function (container, blob, leaseId, leaseAction, options, callback) { + var webResource; + if (!azureutil.objectIsNull(blob)) { + validate.validateArgs('_leaseImpl', function (v) { + v.string(blob, 'blob'); + }); + var resourceName = createResourceName(container, blob); + webResource = WebResource.put(resourceName); + } else { + webResource = WebResource.put(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container'); + } + + webResource.withQueryOption(QueryStringConstants.COMP, 'lease') + .withHeader(HeaderConstants.LEASE_ID, leaseId) + .withHeader(HeaderConstants.LEASE_ACTION, leaseAction.toLowerCase()) + .withHeader(HeaderConstants.LEASE_BREAK_PERIOD, options.leaseBreakPeriod) + .withHeader(HeaderConstants.PROPOSED_LEASE_ID, options.proposedLeaseId) + .withHeader(HeaderConstants.LEASE_DURATION, options.leaseDuration); + + var processResponseCallback = function (responseObject, next) { + responseObject.leaseResult = null; + if (!responseObject.error) { + responseObject.leaseResult = new LeaseResult(container, blob); + responseObject.leaseResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.leaseResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates a page blob from text. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} text The text string. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the page content. This hash is used to verify the integrity of the page during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, pageBlob, response)} callback `error` will contain information +* if an error occurs; otherwise `pageBlob` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._createPages = function (container, blob, text, readStream, rangeStart, rangeEnd, options, callback) { + var request = this._updatePageBlobPagesImpl(container, blob, rangeStart, rangeEnd, BlobConstants.PageWriteOptions.UPDATE, options); + + // At this point, we have already validated that the range is less than 4MB. Therefore, we just need to calculate the contentMD5 if required. + // Even when this is called from the createPagesFromStream method, it is pre-buffered and called with text. + if (!azureutil.objectIsNull(text) && azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + request.withHeader(HeaderConstants.CONTENT_MD5, azureutil.getContentMd5(text)); + } + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if (!azureutil.objectIsNull(text)) { + this.performRequest(request, text, options, processResponseCallback); + } else { + this.performRequestOutputStream(request, readStream, options, processResponseCallback); + } +}; + +/** +* @ignore +*/ +BlobService.prototype._updatePageBlobPagesImpl = function (container, blob, rangeStart, rangeEnd, writeMethod, options) { + if (rangeStart && rangeStart % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_START_OFFSET); + } + + if (rangeEnd && (rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_END_OFFSET); + } + + // this is necessary if this is called from _uploadContentFromChunkStream->_createPages + if (!options) { + options = {}; + } + + options.rangeStart = rangeStart; + options.rangeEnd = rangeEnd; + + options.contentLength = writeMethod === BlobConstants.PageWriteOptions.UPDATE ? (rangeEnd - rangeStart) + 1 : 0; + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'page') + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/octet-stream') + .withHeader(HeaderConstants.PAGE_WRITE, writeMethod); + + BlobResult.setHeadersFromBlob(webResource, options); + + return webResource; +}; + +/** +* Uploads blob content from a stream. +* For block blob, it creates a new block to be committed. +* For page blob, it writes a range of pages. +* For append blob, it appends a new block. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} blobType The blob type. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object|function} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. +* @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {function(error, null)} callback The callback function. +* @return {SpeedSummary} +*/ + +BlobService.prototype._uploadContentFromChunkStream = function (container, blob, blobType, chunkStream, streamLength, options, callback) { + this.logger.debug(util.format('_uploadContentFromChunkStream for blob %s', blob)); + + var apiName; + var isBlockBlobUpload; + var isPageBlobUpload; + var isAppendBlobUpload; + var sizeLimitation; + var originalContentMD5 = azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null); + var parallelOperationThreadCount = options.parallelOperationThreadCount || this.parallelOperationThreadCount; + + if (blobType == BlobConstants.BlobTypes.BLOCK) { + apiName = 'createBlockFromText'; + isBlockBlobUpload = true; + + // BlockBlob can only have 50000 blocks in maximum + var minBlockSize = Math.ceil(streamLength / 50000); + if (options.blockSize) { + if (options.blockSize < minBlockSize) { + // options.blockSize is less than the minBlockSize, error callback + var error = new ArgumentError('options.blockSize', util.format('The minimum blockSize is %s and the provided blockSize %s is too small.', minBlockSize, options.blockSize)); + callback(error); + return; + } else { + sizeLimitation = options.blockSize; + } + } else { + // 4MB minimum for auto-calculated block size + sizeLimitation = Math.max(minBlockSize, BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES); + } + } else if (blobType == BlobConstants.BlobTypes.PAGE) { + apiName = '_createPages'; + isPageBlobUpload = true; + sizeLimitation = BlobConstants.DEFAULT_WRITE_PAGE_SIZE_IN_BYTES; + } else if (blobType == BlobConstants.BlobTypes.APPEND) { + apiName = 'appendBlockFromText'; + isAppendBlobUpload = true; + parallelOperationThreadCount = 1; + sizeLimitation = BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + } else { + var error = new ArgumentError('blobType', util.format('Unknown blob type %s', blobType)); + callback(error); + return; + } + + chunkStream._highWaterMark = sizeLimitation; + + this._setOperationExpiryTime(options); + + // initialize the speed summary + var speedSummary = options.speedSummary || new SpeedSummary(blob); + speedSummary.totalSize = streamLength; + + // initialize chunk allocator + var allocator = new ChunkAllocator(sizeLimitation, parallelOperationThreadCount, { logger: this.logger }); + chunkStream.setMemoryAllocator(allocator); + chunkStream.setOutputLength(streamLength); + + // if this is a FileReadStream, set the allocator on that stream + if (chunkStream._stream && chunkStream._stream.setMemoryAllocator) { + var fileReadStreamAllocator = new ChunkAllocator(chunkStream._stream._highWaterMark, parallelOperationThreadCount, { logger: this.logger }); + chunkStream._stream.setMemoryAllocator(fileReadStreamAllocator); + } + + // initialize batch operations + var batchOperations = new BatchOperation(apiName, { + callInOrder: isAppendBlobUpload, + callbackInOrder: isAppendBlobUpload, + logger: this.logger, + enableReuseSocket: this.defaultEnableReuseSocket, + operationMemoryUsage: sizeLimitation + }); + batchOperations.setConcurrency(parallelOperationThreadCount); + + // initialize options + var rangeOptions = { + leaseId: options.leaseId, + timeoutIntervalInMs: options.timeoutIntervalInMs, + clientRequestTimeoutInMs: options.clientRequestTimeoutInMs, + operationExpiryTime: options.operationExpiryTime, + maxBlobSize: options.maxBlobSize, + appendPosition: options.appendPosition || 0, + initialAppendPosition: options.appendPosition || 0, + absorbConditionalErrorsOnRetry: options.absorbConditionalErrorsOnRetry + }; + + // initialize block blob variables + var blockIdPrefix = options.blockIdPrefix || this.generateBlockIdPrefix(); + var blockCount = 0; + var blockIds = []; + var blobResult = {}; + + var self = this; + chunkStream.on('data', function (data, range) { + var operation = null; + var full = false; + var autoIncrement = speedSummary.getAutoIncrementFunction(data.length); + + if (data.length > sizeLimitation) { + throw new RangeError(util.format(SR.EXCEEDED_SIZE_LIMITATION, sizeLimitation, data.length)); + } + + if (options.useTransactionalMD5) { + //calculate content md5 for the current uploading block data + var contentMD5 = azureutil.getContentMd5(data); + rangeOptions.transactionalContentMD5 = contentMD5; + } + + var checkLengthLimit = function () { + if (!streamLength) return true; + if (range.start >= streamLength) { + self.logger.debug(util.format('Stop uploading data from %s bytes to %s bytes to blob %s because of limit %s', range.start, range.end, blob, streamLength)); + chunkStream.stop(); + return false; + } else if (range.end >= streamLength) { + self.logger.debug(util.format('Clip uploading data from %s bytes to %s bytes to blob %s because of limit %s', range.start, range.end, blob, streamLength)); + range.end = streamLength - 1; + data = data.slice(0, streamLength - range.start); + if (options.useTransactionalMD5) { + rangeOptions.transactionalContentMD5 = azureutil.getContentMd5(data); + } + } + return true; + }; + + var uploadBlockBlobChunk = function () { + if (!checkLengthLimit()) return; + var blockId = self.getBlockId(blockIdPrefix, blockCount); + blockIds.push(blockId); + + operation = new BatchOperation.RestOperation(self, apiName, blockId, container, blob, data, rangeOptions, function (error) { + if (!error) { + autoIncrement(); + } else { + self.logger.debug(util.format('Stop uploading data as error happens. Error: %s', util.inspect(error))); + chunkStream.stop(); + } + allocator.releaseBuffer(data); + data = null; + }); + + blockCount++; + }; + + var uploadPageBlobChunk = function () { + if (!checkLengthLimit()) return; + + if (azureutil.isBufferAllZero(data)) { + self.logger.debug(util.format('Skip upload data from %s bytes to %s bytes to blob %s', range.start, range.end, blob)); + speedSummary.increment(data.length); + } else { + self.logger.debug(util.format('Upload data from %s bytes to %s bytes to blob %s', range.start, range.end, blob)); + operation = new BatchOperation.RestOperation(self, apiName, container, blob, data, null, range.start, range.end, rangeOptions, function (error) { + if (!error) { + autoIncrement(); + } else { + self.logger.debug(util.format('Stop uploading data as error happens. Error: %s', util.inspect(error))); + chunkStream.stop(); + } + allocator.releaseBuffer(data); + data = null; + }); + } + }; + + var uploadAppendBlobChunk = function () { + if (!checkLengthLimit()) return; + + rangeOptions.appendPosition = Number(rangeOptions.initialAppendPosition) + Number(range.start); + + // We cannot differentiate between max size condition failing only in the retry versus failing in the first attempt and retry. + // So we will eliminate the latter and handle the former in the append operation callback. + if (options.maxBlobSize && rangeOptions.appendPosition + data.length > options.maxBlobSize) { + throw new Error(SR.MAX_BLOB_SIZE_CONDITION_NOT_MEET); + } + + operation = new BatchOperation.RestOperation(self, apiName, container, blob, data, rangeOptions, function (error, currentBlob) { + if (!error) { + autoIncrement(); + } else { + self.logger.debug(util.format('Stop uploading data as error happens. Error: %s', util.inspect(error))); + chunkStream.stop(); + } + blobResult = currentBlob; + allocator.releaseBuffer(data); + data = null; + }); + }; + + if (isBlockBlobUpload) { + uploadBlockBlobChunk(); + } else if (isAppendBlobUpload) { + uploadAppendBlobChunk(); + } else if (isPageBlobUpload) { + uploadPageBlobChunk(); + } + + if (operation) { + full = batchOperations.addOperation(operation); + operation = null; + + if (full) { + self.logger.debug('File stream paused'); + chunkStream.pause(); + } + } + }); + + chunkStream.on('end', function () { + self.logger.debug(util.format('File read stream ended for blob %s', blob)); + batchOperations.enableComplete(); + }); + + batchOperations.on('drain', function () { + self.logger.debug('file stream resume'); + chunkStream.resume(); + }); + + batchOperations.on('end', function (error) { + self.logger.debug('batch operations commited'); + + speedSummary = null; + if (error) { + callback(error); + return; + } + + if (originalContentMD5) { + options.contentSettings.contentMD5 = originalContentMD5; + } else if (options.storeBlobContentMD5) { + var contentMD5 = chunkStream.getContentMd5('base64'); + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], contentMD5); + } + + if (isBlockBlobUpload) { + //commit block list + var blockList = { 'UncommittedBlocks': blockIds }; + self.commitBlocks(container, blob, blockList, options, function (error, blockList, response) { + self.logger.debug(util.format('Blob %s committed', blob)); + + if (error) { + chunkStream.finish(); + + callback(error); + } else { + blobResult['commmittedBlocks'] = blockIds; + + chunkStream.finish(); + callback(error, blobResult, response); + } + }); + } else { + // upload page blob or append blob completely + var blobProperties = options.contentSettings; + self.setBlobProperties(container, blob, blobProperties, function (error, blob, response) { + chunkStream.finish(); + blob = extend(false, blob, blobResult); + callback(error, blob, response); + }); + } + }); + + return speedSummary; +}; + +/** +* Checks whether or not a container exists on the service. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the container information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._doesContainerExist = function (container, primaryOnly, options, callback) { + var webResource = WebResource.head(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + if (primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = new ContainerResult(container); + if (!responseObject.error) { + responseObject.containerResult.exists = true; + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.containerResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a blob exists on the service. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the blob information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._doesBlobExist = function (container, blob, primaryOnly, options, callback) { + var resourceName = createResourceName(container, blob); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + if (primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = new BlobResult(container, blob); + if (!responseObject.error) { + responseObject.blobResult.exists = true; + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.blobResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* @ignore +*/ +BlobService.prototype._setBlobPropertiesHelper = function (settings) { + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(settings.container, settings.blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + settings.callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(settings.webResource, null, settings.options, processResponseCallback); +}; + +/** +* @ignore +*/ +BlobService.prototype._validateLengthAndMD5 = function (options, responseObject) { + var storedMD5 = responseObject.response.headers[Constants.HeaderConstants.CONTENT_MD5]; + var contentLength; + + if (!azureutil.objectIsNull(responseObject.response.headers[Constants.HeaderConstants.CONTENT_LENGTH])) { + contentLength = parseInt(responseObject.response.headers[Constants.HeaderConstants.CONTENT_LENGTH], 10); + } + + // If the user has not specified this option, the default value should be false. + if (azureutil.objectIsNull(options.disableContentMD5Validation)) { + options.disableContentMD5Validation = false; + } + + // None of the below cases should be retried. So set the error in every case so the retry policy filter handle knows that it shouldn't be retried. + if (options.disableContentMD5Validation === false && options.useTransactionalMD5 === true && azureutil.objectIsNull(storedMD5)) { + responseObject.error = new StorageError(SR.MD5_NOT_PRESENT_ERROR); + responseObject.retryable = false; + } + + // Validate length and if required, MD5. + // If getBlobToText called this method, then the responseObject.length and responseObject.contentMD5 are not set. Calculate them first using responseObject.response.body and then validate. + if (azureutil.objectIsNull(responseObject.length)) { + if (typeof responseObject.response.body == 'string') { + responseObject.length = Buffer.byteLength(responseObject.response.body); + } else if (Buffer.isBuffer(responseObject.response.body)) { + responseObject.length = responseObject.response.body.length; + } + } + + if (!azureutil.objectIsNull(contentLength) && responseObject.length !== contentLength) { + responseObject.error = new Error(SR.CONTENT_LENGTH_MISMATCH); + responseObject.retryable = false; + } + + if (options.disableContentMD5Validation === false && azureutil.objectIsNull(responseObject.contentMD5)) { + responseObject.contentMD5 = azureutil.getContentMd5(responseObject.response.body); + } + + if (options.disableContentMD5Validation === false && !azureutil.objectIsNull(storedMD5) && storedMD5 !== responseObject.contentMD5) { + responseObject.error = new Error(util.format(SR.HASH_MISMATCH, storedMD5, responseObject.contentMD5)); + responseObject.retryable = false; + } +}; + +/** +* @ignore +*/ +BlobService.prototype._setRangeContentMD5Header = function (webResource, options) { + if (!azureutil.objectIsNull(options.rangeStart) && options.useTransactionalMD5) { + if (azureutil.objectIsNull(options.rangeEnd)) { + throw new ArgumentNullError('options.rangeEndHeader', util.format(SR.ARGUMENT_NULL_OR_EMPTY, options.rangeEndHeader)); + } + + var size = parseInt(options.rangeEnd, 10) - parseInt(options.rangeStart, 10) + 1; + if (size > BlobConstants.MAX_RANGE_GET_SIZE_WITH_MD5) { + throw new ArgumentError('options', SR.INVALID_RANGE_FOR_MD5); + } else { + webResource.withHeader(HeaderConstants.RANGE_GET_CONTENT_MD5, 'true'); + } + } +}; + +/** +* Downloads a blockblob, pageblob or appendblob into a range stream. +* @ignore +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} blobType The type of blob to download: block blob, page blob or append blob. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype._getBlobToRangeStream = function (container, blob, blobType, writeStream, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('_getBlobToRangeStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobNameIsValid(container, blob); + v.blobTypeIsValid(blobType); + v.callback(callback); + }); + + var rangeStream = null; + var isPageBlobDownload = true; + + if (blobType == BlobConstants.BlobTypes.PAGE) { + rangeStream = new PageRangeStream(this, container, blob, options); + } else if (blobType == BlobConstants.BlobTypes.APPEND) { + rangeStream = new RangeStream(this, container, blob, options); + isPageBlobDownload = false; + } else if (blobType == BlobConstants.BlobTypes.BLOCK) { + rangeStream = new BlockRangeStream(this, container, blob, options); + isPageBlobDownload = false; + } + + if (!options.speedSummary) { + options.speedSummary = new SpeedSummary(blob); + } + + var speedSummary = options.speedSummary; + var parallelOperationThreadCount = options.parallelOperationThreadCount || this.parallelOperationThreadCount; + var batchOperations = new BatchOperation('getBlobInRanges', { callbackInOrder: true, logger: this.logger, enableReuseSocket: this.defaultEnableReuseSocket }); + batchOperations.setConcurrency(parallelOperationThreadCount); + + var self = this; + var checkMD5sum = !options.disableContentMD5Validation; + var md5Hash = null; + if (checkMD5sum) { + md5Hash = new Md5Wrapper().createMd5Hash(); + } + + var savedBlobResult = null; + var savedBlobResponse = null; + + rangeStream.on('range', function (range) { + if (!speedSummary.totalSize) { + speedSummary.totalSize = rangeStream.rangeSize; + } + + var requestOptions = { + rangeStart: range.start, + rangeEnd: range.end, + responseEncoding: null //Use Buffer to store the response data + }; + + var rangeSize = range.size; + requestOptions.timeoutIntervalInMs = options.timeoutIntervalInMs; + requestOptions.clientRequestTimeoutInMs = options.clientRequestTimeoutInMs; + requestOptions.useTransactionalMD5 = options.useTransactionalMD5; + requestOptions.snapshotId = options.snapshotId; + + if (range.dataSize === 0) { + if (isPageBlobDownload) { + var autoIncrement = speedSummary.getAutoIncrementFunction(rangeSize); + //No operation to do and only wait for write zero to file in callback + var writeZeroOperation = new BatchOperation.CommonOperation(BatchOperation.noOperation, function (error) { + if (error) return; + var bufferAvailable = azureutil.writeZerosToStream(writeStream, rangeSize, md5Hash, autoIncrement); + //There is no need to pause the rangestream since we can perform http request and write disk at the same time + self.logger.debug(util.format('Write %s bytes Zero from %s to %s', rangeSize, range.start, range.end)); + if (!bufferAvailable) { + self.logger.debug('Write stream is full and pause batch operation'); + batchOperations.pause(); + } + }); + batchOperations.addOperation(writeZeroOperation); + } else { + self.logger.debug(util.format('Can not read %s bytes to %s bytes of blob %s', range.start, range.end, blob)); + } + return; + } + + if (range.start > range.end) { + return; + } + + var operation = new BatchOperation.RestOperation(self, 'getBlobToText', container, blob, requestOptions, function (error, content, blobResult, response) { + if (!error) { + if (rangeSize !== content.length) { + self.logger.warn(util.format('Request %s bytes, but server returns %s bytes', rangeSize, content.length)); + } + //Save one of the succeeded callback parameters and use them at the final callback + if (!savedBlobResult) { + savedBlobResult = blobResult; + } + if (!savedBlobResponse) { + savedBlobResponse = response; + } + var autoIncrement = speedSummary.getAutoIncrementFunction(content.length); + var bufferAvailable = writeStream.write(content, autoIncrement); + if (!bufferAvailable) { + self.logger.debug('Write stream is full and pause batch operation'); + batchOperations.pause(); + } + if (md5Hash) { + md5Hash.update(content); + } + content = null; + } else { + self.logger.debug(util.format('Stop downloading data as error happens. Error: %s', util.inspect(error))); + rangeStream.stop(); + } + }); + + var full = batchOperations.addOperation(operation); + if (full) { + self.logger.debug('Pause range stream'); + rangeStream.pause(); + } + }); + + rangeStream.on('end', function () { + self.logger.debug('Range stream has ended.'); + batchOperations.enableComplete(); + }); + + batchOperations.on('drain', function () { + self.logger.debug('Resume range stream'); + rangeStream.resume(); + }); + + writeStream.on('drain', function () { + self.logger.debug('Resume batch operations'); + batchOperations.resume(); + }); + + batchOperations.on('end', function (error) { + self.logger.debug('Download completed!'); + if (error) { + callback(error); + return; + } else { + writeStream.end(function () { + self.logger.debug('Write stream has ended'); + if (!savedBlobResult) { + savedBlobResult = {}; + } + + azureutil.setObjectInnerPropertyValue(savedBlobResult, ['contentSettings', 'contentMD5'], azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null)); + savedBlobResult.clientSideContentMD5 = null; + if (md5Hash) { + savedBlobResult.clientSideContentMD5 = md5Hash.digest('base64'); + } + callback(error, savedBlobResult, savedBlobResponse); + }); + } + }); + + var listOptions = { + timeoutIntervalInMs: options.timeoutIntervalInMs, + clientRequestTimeoutInMs: options.clientRequestTimeoutInMs, + snapshotId: options.snapshotId, + leaseId: options.leaseId, + blockListFilter: BlobUtilities.BlockListFilter.COMMITTED + }; + + rangeStream.list(listOptions, function (error) { + callback(error); + }); + + return speedSummary; +}; + +/** +* Downloads a blockblob or pageblob into a stream. +* @ignore +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._getBlobToStream = function (container, blob, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.get(resourceName).withRawResponse(); + + var options = extend(true, {}, userOptions); + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + BlobResult.setHeadersFromBlob(webResource, options); + + this._setRangeContentMD5Header(webResource, options); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + + self._validateLengthAndMD5(options, responseObject); + + if (options.speedSummary) { + options.speedSummary.increment(responseObject.length); + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequestInputStream(webResource, null, writeStream, options, processResponseCallback); +}; + +/** +* Lists a segment containing a collection of blob items whose names begin with the specified prefix in the container. +* @ignore +* @this {BlobService} +* @param {string} container The container name. +* @param {string} prefix The prefix of the blob name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {ListBlobTypes} listBlobType Specifies the item type of the results. +* @param {object} [options] The request options. +* @param {int} [options.maxResults] Specifies the maximum number of blobs to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {string} [options.include] Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs', 'copy', 'deleted'). +* Please find these values in BlobUtilities.BlobListingDetails. Multiple values can be added separated with a comma (,). +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the entries of blobs and the continuation token for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._listBlobsOrDircotriesSegmentedWithPrefix = function (container, prefix, currentToken, listBlobType, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listBlobsSegmented', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOptions(options, + QueryStringConstants.DELIMITER, + QueryStringConstants.INCLUDE); + + if (!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + webResource.withQueryOption(QueryStringConstants.PREFIX, prefix); + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listBlobsResult = null; + if (!responseObject.error) { + responseObject.listBlobsResult = { + entries: null, + continuationToken: null + }; + + responseObject.listBlobsResult.entries = []; + var results = []; + + if (listBlobType == BlobConstants.ListBlobTypes.Directory && responseObject.response.body.EnumerationResults.Blobs.BlobPrefix) { + results = responseObject.response.body.EnumerationResults.Blobs.BlobPrefix; + if (!_.isArray(results)) { + results = [results]; + } + } else if (listBlobType == BlobConstants.ListBlobTypes.Blob && responseObject.response.body.EnumerationResults.Blobs.Blob) { + results = responseObject.response.body.EnumerationResults.Blobs.Blob; + if (!_.isArray(results)) { + results = [results]; + } + } + + results.forEach(function (currentBlob) { + var blobResult = BlobResult.parse(currentBlob); + responseObject.listBlobsResult.entries.push(blobResult); + }); + + if (responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listBlobsResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listBlobsResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listBlobsResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listBlobsResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Create a new blob. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {int} size The blob size. +* @param {object} [options] The request options. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {errorOrResult} callback The callback which operates on the specific blob. +*/ +BlobService.prototype._createBlob = function (container, blob, blobType, size, options, creationCallback) { + if (blobType == BlobConstants.BlobTypes.APPEND) { + this.createOrReplaceAppendBlob(container, blob, options, function (createError, createResponse) { + creationCallback(createError, null, createResponse); + }); + } else if (blobType == BlobConstants.BlobTypes.PAGE) { + this.createPageBlob(container, blob, size, options, function (createError) { + creationCallback(createError); + }); + } else if (blobType == BlobConstants.BlobTypes.BLOCK) { + creationCallback(); + } +}; + +/** +* The callback for {BlobService~getBlobToText}. +* @typedef {function} BlobService~blobToText +* @param {object} error If an error occurs, the error information. +* @param {string} text The text returned from the blob. +* @param {object} blockBlob Information about the blob. +* @param {object} response Information related to this operation. +*/ + +BlobService.SpeedSummary = SpeedSummary; + +module.exports = BlobService; diff --git a/src/node_modules/azure-storage/lib/services/blob/blobservice.node.js b/src/node_modules/azure-storage/lib/services/blob/blobservice.node.js new file mode 100644 index 0000000..32a43fb --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/blobservice.node.js @@ -0,0 +1,475 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = require('./../../common/common.node'); +var BlobService = require('./blobservice.core'); +var extend = require('extend'); +var fs = require('fs'); +var mime = require('browserify-mime'); + +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var FileReadStream = azureCommon.FileReadStream; +var SpeedSummary = azureCommon.SpeedSummary; +var validate = azureCommon.validate; +var BlobConstants = Constants.BlobConstants; + +/** +* Downloads a blob into a file. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be downloaded. +* @param {object} [options] The request options. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small blobs. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link BlobResult}` will contain the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.getBlobToLocalFile('taskcontainer', 'task1', 'task1-download.txt', function(error, serverBlob) { +* if(!error) { +* // Blob available in serverBlob.blob variable +* } +*/ +BlobService.prototype.getBlobToLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('getBlobToLocalFile', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.string(localFileName, 'localFileName'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + return this._getBlobToLocalFile(container, blob, localFileName, options, callback); +}; + +/** +* Uploads a page blob from file. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createPageBlobFromLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + return this._createBlobFromLocalFile(container, blob, BlobConstants.BlobTypes.PAGE, localFileName, optionsOrCallback, callback); +}; + +/** +* Creates a new block blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createBlockBlobFromLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + return this._createBlobFromLocalFile(container, blob, BlobConstants.BlobTypes.BLOCK, localFileName, optionsOrCallback, callback); +}; + +/** +* Creates a new append blob from a local file. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If you want to append data to an already existing blob, please look at appendFromLocalFile. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 ahash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createAppendBlobFromLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + return this._createBlobFromLocalFile(container, blob, BlobConstants.BlobTypes.APPEND, localFileName, optionsOrCallback, callback); +}; + +/** +* Appends to an append blob from a local file. Assumes the blob already exists on the service. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.appendFromLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('appendFromLocalFile', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.string(localFileName, 'localFileName'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + var self = this; + fs.stat(localFileName, function (error, stat) { + if (error) { + callback(error); + } else { + var stream = new FileReadStream(localFileName, { calcContentMd5: options.storeBlobContentMD5 }); + var streamCallback = function (appendError, blob, response) { + if (azureutil.objectIsFunction(stream.destroy)) { + stream.destroy(); + } + callback(appendError, blob, response); + }; + + try { + self._uploadBlobFromStream(false, container, blob, BlobConstants.BlobTypes.APPEND, stream, stat.size, options, streamCallback); + } catch (err) { + callback(err); + } + } + }); + + return options.speedSummary; +}; + +// Private methods + +/** +* Creates a new blob (Block/Page/Append). If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The MD5 hash of the blob content. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* +* @return {SpeedSummary} +* +*/ +BlobService.prototype._createBlobFromLocalFile = function (container, blob, blobType, localFileName, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createBlobFromLocalFile', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + v.string(localFileName, 'localFileName'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + var self = this; + var size = 0; + + var creationCallback = function (createError, createBlob, createResponse) { + if (createError) { + callback(createError, createBlob, createResponse); + } else { + // Automatically detect the mime type + if(azureutil.tryGetValueChain(options, ['contentSettings','contentType'], undefined) === undefined) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings','contentType'], mime.lookup(localFileName)); + } + + var stream = new FileReadStream(localFileName, { calcContentMd5: options.storeBlobContentMD5 }); + var streamCallback = function (createError, createBlob, createResponse) { + if (azureutil.objectIsFunction(stream.destroy)) { + stream.destroy(); + } + callback(createError, createBlob, createResponse); + }; + self._uploadBlobFromStream(true, container, blob, blobType, stream, size, options, streamCallback); + } + }; + + // Check the file size to determine the upload method: single request or chunks + fs.stat(localFileName, function (error, stat) { + if (error) { + callback(error); + } else { + size = stat.size; + try { + self._createBlob(container, blob, blobType, size, options, creationCallback); + } catch (err) { + callback(err); + } + } + }); + + return options.speedSummary; +}; + +/** +* Downloads a blob into a file. +* @ignore +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be downloaded. +* @param {object} [options] The request options. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small blobs. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +*/ +BlobService.prototype._getBlobToLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + var writeStream = fs.createWriteStream(localFileName, { 'highWaterMark': BlobConstants.MAX_QUEUED_WRITE_DISK_BUFFER_SIZE }); + writeStream.on('error', function (error) { + callback(error); + }); + + this.getBlobToStream(container, blob, writeStream, options, function (error, responseBlob, response) { + if (error) { + var onErrorCallback = function() { + // If the download failed from the beginning, remove the file. + if (fs.existsSync(localFileName) && writeStream.bytesWritten === 0) { + fs.unlinkSync(localFileName); + } + callback(error, responseBlob, response); + }; + if (!writeStream.closed) { + writeStream.end(onErrorCallback); + } else { + onErrorCallback(); + } + } else { + callback(error, responseBlob, response); + } + }); + + return options.speedSummary; +}; + +module.exports = BlobService; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/blob/blobutilities.js b/src/node_modules/azure-storage/lib/services/blob/blobutilities.js new file mode 100644 index 0000000..9b3f9e6 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/blobutilities.js @@ -0,0 +1,136 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'BlobUtilities'. +exports = module.exports; + +/** +* Defines constants, enums, and utility functions for use with the Blob service. +* @namespace BlobUtilities +*/ +var BlobUtilities = { + /** + * Permission types + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + READ: 'r', + ADD: 'a', + CREATE: 'c', + WRITE: 'w', + DELETE: 'd', + LIST: 'l' + }, + + /** + * Blob listing details. + * + * @const + * @enum {string} + */ + BlobListingDetails: { + SNAPSHOTS: 'snapshots', + METADATA: 'metadata', + UNCOMMITTED_BLOBS: 'uncommittedblobs', + COPY: 'copy', + DELETED: 'deleted' + }, + + /** + * Deletion options for blob snapshots + * + * @const + * @enum {string} + */ + SnapshotDeleteOptions: { + SNAPSHOTS_ONLY: 'only', + BLOB_AND_SNAPSHOTS: 'include' + }, + + /** + * Type of block list to retrieve + * + * @const + * @enum {string} + */ + BlockListFilter: { + ALL: 'all', + COMMITTED: 'committed', + UNCOMMITTED: 'uncommitted' + }, + + /** + * Blobs and container public access types. + * + * @const + * @enum {string} + */ + BlobContainerPublicAccessType: { + OFF: null, + CONTAINER: 'container', + BLOB: 'blob' + }, + + /** + * Describes actions that can be performed on a page blob sequence number. + * @const + * @enum {string} + */ + SequenceNumberAction: { + MAX: 'max', + UPDATE: 'update', + INCREMENT: 'increment' + }, + + /** + * Candidate values for blob tiers. + * + * @property {object} PremiumPageBlobTier Candidate values for premium pageblob tiers. + * @property {string} PremiumPageBlobTier.P4 + * @property {string} PremiumPageBlobTier.P6 + * @property {string} PremiumPageBlobTier.P10 + * @property {string} PremiumPageBlobTier.P20 + * @property {string} PremiumPageBlobTier.P30 + * @property {string} PremiumPageBlobTier.P40 + * @property {string} PremiumPageBlobTier.P50 + * @property {string} PremiumPageBlobTier.P60 + * @property {object} StandardBlobTier Candidate values for standard blobs tiers. + * @property {string} StandardBlobTier.HOT + * @property {string} StandardBlobTier.COOL + * @property {string} StandardBlobTier.ARCHIVE + */ + BlobTier: { + PremiumPageBlobTier: { + P4: 'P4', + P6: 'P6', + P10: 'P10', + P20: 'P20', + P30: 'P30', + P40: 'P40', + P50: 'P50', + P60: 'P60' + }, + StandardBlobTier: { + HOT: 'Hot', + COOL: 'Cool', + ARCHIVE: 'Archive' + } + } +}; + +module.exports = BlobUtilities; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/blob/internal/blockrangestream.js b/src/node_modules/azure-storage/lib/services/blob/internal/blockrangestream.js new file mode 100644 index 0000000..f94a2fd --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/internal/blockrangestream.js @@ -0,0 +1,203 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var Constants = require('./../../../common/util/constants'); +var EventEmitter = require('events').EventEmitter; +var BlobUtilities = require('./../blobutilities'); + +/** +* BlockBlob block range stream +*/ +function BlockRangeStream(blobServiceClient, container, blob, options) { + this.blobServiceClient = blobServiceClient; + this.container = container; + this.blob = blob; + this._emitter = new EventEmitter(); + this._paused = false; + this._emittedAll = false; + this._emittedRangeType = null; + this._emittedRangeIndex = null; + this._offset = 0; + this._rangelist = []; + this._isEmitting = false; + if (options.rangeStart) { + this._startOffset = options.rangeStart; + } else { + this._startOffset = 0; + } + if (options.rangeEnd) { + this._endOffset = options.rangeEnd; + } else { + this._endOffset = Number.MAX_VALUE; + } +} + +/** +* Add event listener +*/ +BlockRangeStream.prototype.on = function (event, listener) { + this._emitter.on(event, listener); +}; + +/** +* Get block list +*/ +BlockRangeStream.prototype.list = function (options, callback) { + if (!options) { + options = {}; + } + + if (!options.blockListFilter) { + options.blockListFilter = BlobUtilities.BlockListFilter.ALL; + } + + var self = this; + this.blobServiceClient.listBlocks(this.container, this.blob, options.blockListFilter, options, function (error, blocklist, response) { + if (error) { + callback(error); + } else { + var totalSize = parseInt(response.headers[Constants.HeaderConstants.BLOB_CONTENT_LENGTH], 10); + if (!blocklist.CommittedBlocks) { + //Convert single block blob to block blob range + var name = 'NODESDK_BLOCKBLOB_RANGESTREAM'; + blocklist.CommittedBlocks = [{ Name : name, Size : totalSize }]; + } + + self._rangelist = blocklist; + self._emitBlockList(); + self = blocklist = null; + } + }); +}; + +/** +* Emit block ranges +*/ +BlockRangeStream.prototype._emitBlockList = function () { + if (this._paused || this._emittedAll || this._isEmitting) return; + + var self = this; + this._getTypeList(function () { + self._rangelist = null; + self._emittedAll = true; + self._emitter.emit('end'); + }); +}; + +/** +* Get the block type list +*/ +BlockRangeStream.prototype._getTypeList = function (callback) { + this._isEmitting = true; + try { + var typeStart = false; + if (this._rangelist) { + for (var blockType in this._rangelist) { + if (this._rangelist.hasOwnProperty(blockType)) { + if (this._emittedRangeType === null || typeStart || this._emittedRangeType == blockType) { + this._emittedRangeType = blockType; + typeStart = true; + } else if (this._emittedRangeType !== blockType) { + continue; + } + + if (this._paused) { + return; + } + + this._emitBlockRange (blockType, callback); + } + } + } + } finally { + this._isEmitting = false; + } +}; + +/** +* Get the block list +*/ +BlockRangeStream.prototype._emitBlockRange = function (blockType, callback) { + var blockList = this._rangelist[blockType]; + var indexStart = false; + for (var blockIndex = 0; blockIndex < blockList.length; blockIndex++) { + if (this._emittedRangeIndex === null || indexStart || this._emittedRangeIndex === blockIndex) { + this._emittedRangeIndex = blockIndex; + indexStart = true; + } else if (this._emittedRangeIndex !== blockIndex) { + continue; + } + + if (this._paused) { + return; + } + + var range = blockList[blockIndex]; + // follow the same naming convention of page ranges and json + range.name = range.Name; + range.type = blockType; + range.start = this._offset; + this._offset += parseInt(range.Size, 10); + range.end = this._offset - 1; + delete range.Name; + delete range.Size; + + if (range.start > this._endOffset) { + break; + } else if (range.end < this._startOffset) { + continue; + } else { + range.start = Math.max(range.start, this._startOffset); + range.end = Math.min(range.end, this._endOffset); + range.size = range.end - range.start + 1; + range.dataSize = range.size; + this._emitter.emit('range', range); + } + } + + // remove the used range and avoid memory leak + this._rangelist[blockType] = null; + + callback(); +}; + +/** +* Pause the stream +*/ +BlockRangeStream.prototype.pause = function () { + this._paused = true; +}; + +/** +* Resume the stream +*/ +BlockRangeStream.prototype.resume = function () { + this._paused = false; + if (!this._isEmitting) { + this._emitBlockList(); + } +}; + +/** +* Stop the stream +*/ +BlockRangeStream.prototype.stop = function () { + this.pause(); + this._emittedAll = true; + this._emitter.emit('end'); +}; + +module.exports = BlockRangeStream; diff --git a/src/node_modules/azure-storage/lib/services/blob/internal/pagerangestream.js b/src/node_modules/azure-storage/lib/services/blob/internal/pagerangestream.js new file mode 100644 index 0000000..1943712 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/internal/pagerangestream.js @@ -0,0 +1,43 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = require('util'); +var RangeStream = require('./../../../common/streams/rangestream'); +var Constants = require('./../../../common/util/constants'); + +/** +* PageBlob page range stream +*/ +function PageRangeStream(blobServiceClient, container, blob, options) { + PageRangeStream['super_'].call(this, blobServiceClient, container, blob, options); + + if (options.minRangeSize) { + this._minRangeSize = options.minRangeSize; + } else { + this._minRangeSize = Constants.BlobConstants.MIN_WRITE_PAGE_SIZE_IN_BYTES; + } + if (options.maxRangeSize) { + this._maxRangeSize = options.maxRangeSize; + } else { + this._maxRangeSize = Constants.BlobConstants.DEFAULT_WRITE_PAGE_SIZE_IN_BYTES; + } + this._lengthHeader = Constants.HeaderConstants.BLOB_CONTENT_LENGTH; + this._listFunc = blobServiceClient.listPageRanges; +} + +util.inherits(PageRangeStream, RangeStream); + +module.exports = PageRangeStream; diff --git a/src/node_modules/azure-storage/lib/services/blob/models/blobresult.js b/src/node_modules/azure-storage/lib/services/blob/models/blobresult.js new file mode 100644 index 0000000..eb12b51 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/models/blobresult.js @@ -0,0 +1,316 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = require('underscore'); + +var azureCommon = require('./../../../common/common.core'); +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new BlobResult object. +* @class +* The BlobResult class is used to store the blob information. +* + * @property {string} container The container name. + * @property {string} name The blob name. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the blob was last modified. + * @property {string} contentLength The size of the blob in bytes. + * @property {string} blobType The blob type. + * @property {boolean} isIncrementalCopy If the blob is incremental copy blob. + * @property {string} requestId The request id. + * @property {string} sequenceNumber The current sequence number for a page blob. + * @property {string} contentRange The content range. + * @property {string} committedBlockCount The committed block count. + * @property {string} serverEncrypted If the blob data and application metadata are completely encrypted using the specified algorithm. true/false. + * @property {object} contentSettings The content settings. + * @property {string} contentSettings.contentType The content type. + * @property {string} contentSettings.contentEncoding The content encoding. + * @property {string} contentSettings.contentLanguage The content language. + * @property {string} contentSettings.cacheControl The cache control. + * @property {string} contentSettings.contentDisposition The content disposition. + * @property {string} contentSettings.contentMD5 The content MD5 hash. + * @property {object} lease The lease information. + * @property {string} lease.id The lease id. + * @property {string} lease.status The lease status. + * @property {string} lease.state The lease state. + * @property {string} lease.duration The lease duration. + * @property {object} copy The copy information. + * @property {string} copy.id The copy id. + * @property {string} copy.status The copy status. + * @property {string} copy.completionTime The copy completion time. + * @property {string} copy.statusDescription The copy status description. + * @property {string} copy.destinationSnapshot The snapshot time of the last successful incremental copy snapshot for this blob. + * @property {string} copy.progress The copy progress. + * @property {string} copy.source The copy source. + * +* @constructor +* @param {string} [container] The container name. +* @param {string} [name] The blob name. +*/ +function BlobResult(container, name) { + if (container) { + this.container = container; + } + + if (name) { + this.name = name; + } +} + +BlobResult.parse = function (blobXml) { + var blobResult = new BlobResult(); + + for (var propertyName in blobXml) { + if (blobXml.hasOwnProperty(propertyName)) { + if (propertyName === 'Properties') { + // Lift out the properties onto the main object to keep consistent across all APIs like: getBlobProperties + azureutil.setPropertyValueFromXML(blobResult, blobXml[propertyName], true); + } else if (propertyName === 'Metadata') { + var resultPropertyName = azureutil.normalizePropertyNameFromXML(propertyName); + blobResult[resultPropertyName] = {}; + azureutil.setPropertyValueFromXML(blobResult[resultPropertyName], blobXml[propertyName], false); + } else { + blobResult[propertyName.toLowerCase()] = blobXml[propertyName]; + } + } + } + + if (blobResult.isIncrementalCopy !== undefined) { + blobResult.isIncrementalCopy = (blobResult.isIncrementalCopy === 'true'); + } + + // convert accessTierInferred to boolean type + if (blobResult.accessTierInferred !== undefined) { + blobResult.accessTierInferred = (blobResult.accessTierInferred === 'true'); + } + + if (blobResult.deleted !== undefined) { + blobResult.deleted = (blobResult.deleted == 'true'); + } + + if (blobResult.remainingRetentionDays !== undefined) { + blobResult.remainingRetentionDays = parseInt(blobResult.remainingRetentionDays); + } + + return blobResult; +}; + +var headersForProperties = { + 'lastModified': 'LAST_MODIFIED', + 'creationTime': 'CREATION_TIME', + 'etag': 'ETAG', + 'sequenceNumber': 'SEQUENCE_NUMBER', + 'blobType': 'BLOB_TYPE', + 'contentLength': 'CONTENT_LENGTH', + 'blobContentLength': 'BLOB_CONTENT_LENGTH', + 'contentRange': 'CONTENT_RANGE', + 'committedBlockCount': 'BLOB_COMMITTED_BLOCK_COUNT', + 'serverEncrypted': 'SERVER_ENCRYPTED', + 'requestId': 'REQUEST_ID', + + 'range': 'RANGE', + 'blobRange': 'STORAGE_RANGE', + 'getContentMd5': 'RANGE_GET_CONTENT_MD5', + 'acceptRanges': 'ACCEPT_RANGES', + 'appendOffset': 'BLOB_APPEND_OFFSET', + + 'accessTier': 'ACCESS_TIER', + 'accessTierChangeTime': 'ACCESS_TIER_CHANGE_TIME', + 'accessTierInferred': 'ACCESS_TIER_INFERRED', + 'archiveStatus': 'ARCHIVE_STATUS', + + 'isIncrementalCopy': 'INCREMENTAL_COPY', + + // ContentSettings + 'contentSettings.contentType': 'CONTENT_TYPE', + 'contentSettings.contentEncoding': 'CONTENT_ENCODING', + 'contentSettings.contentLanguage': 'CONTENT_LANGUAGE', + 'contentSettings.cacheControl': 'CACHE_CONTROL', + 'contentSettings.contentDisposition': 'CONTENT_DISPOSITION', + 'contentSettings.contentMD5': 'CONTENT_MD5', + + // Lease + 'lease.id': 'LEASE_ID', + 'lease.status': 'LEASE_STATUS', + 'lease.duration': 'LEASE_DURATION', + 'lease.state': 'LEASE_STATE', + + // Copy + 'copy.id': 'COPY_ID', + 'copy.status': 'COPY_STATUS', + 'copy.source': 'COPY_SOURCE', + 'copy.progress': 'COPY_PROGRESS', + 'copy.completionTime': 'COPY_COMPLETION_TIME', + 'copy.statusDescription': 'COPY_STATUS_DESCRIPTION', + 'copy.destinationSnapshot': 'COPY_DESTINATION_SNAPSHOT' +}; + +BlobResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setBlobPropertyFromHeaders = function (blobProperty, headerProperty) { + if (!azureutil.tryGetValueChain(self, blobProperty.split('.'), null) && headers[headerProperty.toLowerCase()]) { + azureutil.setObjectInnerPropertyValue(self, blobProperty.split('.'), headers[headerProperty.toLowerCase()]); + + if (blobProperty === 'copy.progress') { + var info = azureutil.parseCopyProgress(self.copy.progress); + self.copy.bytesCopied = parseInt(info.bytesCopied); + self.copy.totalBytes = parseInt(info.totalBytes); + } + } + }; + + // For range get, 'x-ms-blob-content-md5' indicate the overall MD5 of the blob. Try to set the contentMD5 using this header if it presents + setBlobPropertyFromHeaders('contentSettings.contentMD5', HeaderConstants.BLOB_CONTENT_MD5); + + _.chain(headersForProperties).pairs().each(function (pair) { + var property = pair[0]; + var header = HeaderConstants[pair[1]]; + setBlobPropertyFromHeaders(property, header); + }); + + // convert isIncrementalCopy to boolean type + if (self.isIncrementalCopy !== undefined) { + self.isIncrementalCopy = (self.isIncrementalCopy === 'true'); + } + + // convert accessTierInferred to boolean type + if (self.accessTierInferred !== undefined) { + self.accessTierInferred = (self.accessTierInferred == 'true'); + } +}; + +/** +* This method sets the HTTP headers and is used by all methods except setBlobProperties and commitBlocks. Those 2 methods will set the x-ms-* headers using setPropertiesFromBlob. +* @ignore +*/ +BlobResult.setHeadersFromBlob = function (webResource, blob) { + var setHeaderPropertyFromBlob = function (headerProperty, blobProperty) { + var blobPropertyValue = azureutil.tryGetValueChain(blob, blobProperty.split('.'), null); + if (blobPropertyValue) { + webResource.withHeader(headerProperty, blobPropertyValue); + } + }; + + if (blob) { + // Content-Type + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_TYPE, 'contentSettings.contentType'); + + // Content-Encoding + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_ENCODING, 'contentSettings.contentEncoding'); + + // Content-Language + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_LANGUAGE, 'contentSettings.contentLanguage'); + + // Content-Disposition + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_DISPOSITION, 'contentSettings.contentDisposition'); + + // Cache-Control + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CACHE_CONTROL, 'contentSettings.cacheControl'); + + // Blob's Content-MD5 + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_MD5, 'contentSettings.contentMD5'); + + // Content-Length + setHeaderPropertyFromBlob(HeaderConstants.CONTENT_LENGTH, 'contentLength'); + + // transactional Content-MD5 + setHeaderPropertyFromBlob(HeaderConstants.CONTENT_MD5, 'transactionalContentMD5'); + + // Range + if (!azureutil.objectIsNull(blob.rangeStart)) { + var range = 'bytes=' + blob.rangeStart + '-'; + + if (!azureutil.objectIsNull(blob.rangeEnd)) { + range += blob.rangeEnd; + } + + webResource.withHeader(HeaderConstants.RANGE, range); + } + + // Source Range + if (!azureutil.objectIsNull(blob.sourceRangeStart)) { + var sourceRange = 'bytes=' + blob.sourceRangeStart + '-'; + + if (!azureutil.objectIsNull(blob.sourceRangeEnd)) { + sourceRange += blob.sourceRangeEnd; + } + + webResource.withHeader(HeaderConstants.SOURCE_RANGE, sourceRange); + } + + // Blob Type + setHeaderPropertyFromBlob(HeaderConstants.BLOB_TYPE, 'blobType'); + + // Lease id + setHeaderPropertyFromBlob(HeaderConstants.LEASE_ID, 'leaseId'); + + // Sequence number + setHeaderPropertyFromBlob(HeaderConstants.SEQUENCE_NUMBER, 'sequenceNumber'); + setHeaderPropertyFromBlob(HeaderConstants.SEQUENCE_NUMBER_ACTION, 'sequenceNumberAction'); + + if (blob.metadata) { + webResource.addOptionalMetadataHeaders(blob.metadata); + } + } +}; + +/** +* This method sets the x-ms-* headers and is used by setBlobProperties and commitBlocks. All other methods will set the regular HTTP headers using setHeadersFromBlob. +* @ignore +*/ +BlobResult.setPropertiesFromBlob = function (webResource, blob) { + var setHeaderPropertyFromBlob = function (headerProperty, blobProperty) { + var propertyValue = azureutil.tryGetValueChain(blob, blobProperty.split('.'), null); + if (propertyValue) { + webResource.withHeader(headerProperty, propertyValue); + } + }; + + if (blob) { + // Content-Type + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_TYPE, 'contentSettings.contentType'); + + // Content-Encoding + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_ENCODING, 'contentSettings.contentEncoding'); + + // Content-Language + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_LANGUAGE, 'contentSettings.contentLanguage'); + + // Content-Disposition + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_DISPOSITION, 'contentSettings.contentDisposition'); + + // Cache-Control + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CACHE_CONTROL, 'contentSettings.cacheControl'); + + // Content-MD5 + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_MD5, 'contentSettings.contentMD5'); + + // Lease id + setHeaderPropertyFromBlob(HeaderConstants.LEASE_ID, 'leaseId'); + + if (blob.metadata) { + webResource.addOptionalMetadataHeaders(blob.metadata); + } + } +}; + +module.exports = BlobResult; diff --git a/src/node_modules/azure-storage/lib/services/blob/models/blocklistresult.js b/src/node_modules/azure-storage/lib/services/blob/models/blocklistresult.js new file mode 100644 index 0000000..044be41 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/models/blocklistresult.js @@ -0,0 +1,84 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = require('underscore'); + +var azureCommon = require('./../../../common/common.core'); +var xmlbuilder = azureCommon.xmlbuilder; +var Constants = azureCommon.Constants; + +/** +* Builds an XML representation for a block list. +* +* @param {array} The block list. +* @return {string} The XML block list. +*/ +exports.serialize = function (blockListJs) { + var blockListDoc = xmlbuilder.create(Constants.BlobConstants.BLOCK_LIST_ELEMENT, { version: '1.0', encoding: 'utf-8' }); + + if (_.isArray(blockListJs.LatestBlocks)) { + blockListJs.LatestBlocks.forEach(function (block) { + blockListDoc = blockListDoc.ele(Constants.BlobConstants.LATEST_ELEMENT) + .txt(Buffer.from(block).toString('base64')) + .up(); + }); + } + + if (_.isArray(blockListJs.CommittedBlocks)) { + blockListJs.CommittedBlocks.forEach(function (block) { + blockListDoc = blockListDoc.ele(Constants.BlobConstants.COMMITTED_ELEMENT) + .txt(Buffer.from(block).toString('base64')) + .up(); + }); + } + + if (_.isArray(blockListJs.UncommittedBlocks)) { + blockListJs.UncommittedBlocks.forEach(function (block) { + blockListDoc = blockListDoc.ele(Constants.BlobConstants.UNCOMMITTED_ELEMENT) + .txt(Buffer.from(block).toString('base64')) + .up(); + }); + } + + return blockListDoc.doc().toString(); +}; + +exports.parse = function (blockListXml) { + var blockListResult = {}; + + if (blockListXml.CommittedBlocks && blockListXml.CommittedBlocks.Block) { + blockListResult.CommittedBlocks = blockListXml.CommittedBlocks.Block; + if (!_.isArray(blockListResult.CommittedBlocks)) { + blockListResult.CommittedBlocks = [blockListResult.CommittedBlocks]; + } + blockListResult.CommittedBlocks.forEach(function(block) { + block.Name = Buffer.from(block.Name, 'base64').toString(); + }); + } + + if (blockListXml.UncommittedBlocks && blockListXml.UncommittedBlocks.Block) { + blockListResult.UncommittedBlocks = blockListXml.UncommittedBlocks.Block; + if (!_.isArray(blockListResult.UncommittedBlocks)) { + blockListResult.UncommittedBlocks = [blockListResult.UncommittedBlocks]; + } + blockListResult.UncommittedBlocks.forEach(function(block) { + block.Name = Buffer.from(block.Name, 'base64').toString(); + }); + } + + return blockListResult; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/blob/models/containerresult.js b/src/node_modules/azure-storage/lib/services/blob/models/containerresult.js new file mode 100644 index 0000000..b2a04b7 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/models/containerresult.js @@ -0,0 +1,138 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = require('./../../../common/common.core'); +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; + +var HeaderConstants = Constants.HeaderConstants; +var BlobUtilities = require('../blobutilities'); + +/** +* Creates a new ContainerResult object. +* @class +* The ContainerResult class is used to store the container information. +* + * @property {string} name The container name. + * @property {string} publicAccessLevel The public access level. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the container was last modified. + * @property {string} requestId The request id. + * @property {object} lease The lease information. + * @property {string} lease.status The lease status. + * @property {string} lease.state The lease state. + * @property {string} lease.duration The lease duration. + * +* @constructor +* @param {string} [container] The container name. +* @param {string} [publicAccessLevel] The public access level. +*/ +function ContainerResult(name, publicAccessLevel) { + if (name) { + this.name = name; + } + + if (publicAccessLevel) { + this.publicAccessLevel = publicAccessLevel; + } +} + +ContainerResult.parse = function (containerXml) { + var containerResult = new ContainerResult(); + + for (var propertyName in containerXml) { + if (containerXml.hasOwnProperty(propertyName)) { + if (propertyName === 'Properties') { + // Lift out the properties onto the main object to keep consistent across all APIs like: getContainerProperties + azureutil.setPropertyValueFromXML(containerResult, containerXml[propertyName], true); + } else if (propertyName === 'Metadata') { + var resultPropertyName = azureutil.normalizePropertyNameFromXML(propertyName); + containerResult[resultPropertyName] = {}; + azureutil.setPropertyValueFromXML(containerResult[resultPropertyName], containerXml[propertyName], false); + } else { + containerResult[propertyName.toLowerCase()] = containerXml[propertyName]; + } + } + } + + if (!containerResult.publicAccessLevel) { + containerResult.publicAccessLevel = BlobUtilities.BlobContainerPublicAccessType.OFF; + } + + // convert hasImmutabilityPolicy to boolean type + if (containerResult.hasImmutabilityPolicy !== undefined) { + containerResult.hasImmutabilityPolicy = (containerResult.hasImmutabilityPolicy === 'true'); + } + + // convert hasLegalHold to boolean type + if (containerResult.hasLegalHold !== undefined) { + containerResult.hasLegalHold = (containerResult.hasLegalHold === 'true'); + } + + return containerResult; +}; + +ContainerResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setContainerPropertyFromHeaders = function (containerProperty, headerProperty) { + if (!azureutil.tryGetValueChain(self, containerProperty.split('.'), null) && headers[headerProperty.toLowerCase()]) { + azureutil.setObjectInnerPropertyValue(self, containerProperty.split('.'), headers[headerProperty.toLowerCase()]); + } + }; + + setContainerPropertyFromHeaders('etag', HeaderConstants.ETAG); + setContainerPropertyFromHeaders('lastModified', HeaderConstants.LAST_MODIFIED); + setContainerPropertyFromHeaders('lease.status', HeaderConstants.LEASE_STATUS); + setContainerPropertyFromHeaders('lease.state', HeaderConstants.LEASE_STATE); + setContainerPropertyFromHeaders('lease.duration', HeaderConstants.LEASE_DURATION); + setContainerPropertyFromHeaders('requestId', HeaderConstants.REQUEST_ID); + setContainerPropertyFromHeaders('hasImmutabilityPolicy', HeaderConstants.HAS_IMMUTABILITY_POLICY); + setContainerPropertyFromHeaders('hasLegalHold', HeaderConstants.HAS_LEGAL_HOLD); + + // convert hasImmutabilityPolicy to boolean type + if (self.hasImmutabilityPolicy !== undefined) { + self.hasImmutabilityPolicy = (self.hasImmutabilityPolicy === 'true'); + } + + // convert hasLegalHold to boolean type + if (self.hasLegalHold !== undefined) { + self.hasLegalHold = (self.hasLegalHold === 'true'); + } + + if (!self.publicAccessLevel) { + self.publicAccessLevel = BlobUtilities.BlobContainerPublicAccessType.OFF; + if (headers[HeaderConstants.BLOB_PUBLIC_ACCESS]) { + self.publicAccessLevel = headers[HeaderConstants.BLOB_PUBLIC_ACCESS]; + } + } + + if (self.publicAccessLevel === 'true') { + // The container was marked for full public read access using a version prior to 2009-09-19. + self.publicAccessLevel = BlobUtilities.BlobContainerPublicAccessType.CONTAINER; + } +}; + +/** +* The container ACL settings. +* @typedef {object} ContainerAclResult +* @extends {ContainerResult} +* @property {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +*/ + +module.exports = ContainerResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/blob/models/leaseresult.js b/src/node_modules/azure-storage/lib/services/blob/models/leaseresult.js new file mode 100644 index 0000000..8d39caa --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/blob/models/leaseresult.js @@ -0,0 +1,73 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var Constants = require('./../../../common/common.core').Constants; +var HeaderConstants = Constants.HeaderConstants; + + +/** +* Creates a new LeaseResult object. +* @class +* The LeaseResult class is used to store the lease information. +* + * @property {string} container The container name. + * @property {string} blob The blob name. + * @property {string} id The lease id. + * @property {string} time Approximate time remaining in the lease period, in seconds. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the lease was last modified. + * +* @constructor +* @param {string} [container] The container name. +* @param {string} [blob] The blob name. +* @param {string} [id] The lease id. +* @param {string} [time] Approximate time remaining in the lease period, in seconds. +*/ +function LeaseResult(container, blob, id, time) { + if (container) { + this.container = container; + } + + if (blob) { + this.blob = blob; + } + + if (id) { + this.id = id; + } + + if (time) { + this.time = time; + } +} + +LeaseResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + if (!self['id'] && headers[HeaderConstants.LEASE_ID]) { + self['id'] = headers[HeaderConstants.LEASE_ID]; + } + + if (!self['time'] && headers[HeaderConstants.LEASE_TIME]) { + self['time'] = parseInt(headers[HeaderConstants.LEASE_TIME], 10); + } + + self['etag'] = headers[HeaderConstants.ETAG]; + self['lastModified'] = headers[HeaderConstants.LAST_MODIFIED.toLowerCase()]; +}; + +module.exports = LeaseResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/file/fileservice.browser.js b/src/node_modules/azure-storage/lib/services/file/fileservice.browser.js new file mode 100644 index 0000000..12c784b --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/file/fileservice.browser.js @@ -0,0 +1,95 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var FileService = require('./fileservice.core'); +var azureCommon = require('./../../common/common.browser'); +var extend = require('extend'); + +var azureutil = azureCommon.util; +var BrowserFileReadStream = azureCommon.BrowserFileReadStream; +var SpeedSummary = azureCommon.SpeedSummary; +var validate = azureCommon.validate; +var ChunkStreamWithStream = azureCommon.ChunkStreamWithStream; + +/** +* Uploads a file to storage from an HTML File object. If the file already exists on the service, it will be overwritten. +* (Only available in the JavaScript Client Library for Browsers) +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {File} browserFile The File object to be uploaded created by HTML File API. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +FileService.prototype.createFileFromBrowserFile = function (share, directory, file, browserFile, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createFileFromBrowserFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.browserFileIsValid(browserFile); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(file); + + var self = this; + this.createFile(share, directory, file, browserFile.size, options, function (error) { + if (error) { + callback(error); + } else { + var stream = new BrowserFileReadStream(browserFile); + var chunkStream = new ChunkStreamWithStream(stream, { calcContentMd5: options.storeFileContentMD5 }); + self._createFileFromChunkStream(share, directory, file, chunkStream, browserFile.size, options, callback); + } + }); + + return options.speedSummary; +}; + +module.exports = FileService; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/file/fileservice.core.js b/src/node_modules/azure-storage/lib/services/file/fileservice.core.js new file mode 100644 index 0000000..b1112cf --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/file/fileservice.core.js @@ -0,0 +1,3941 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var qs = require('querystring'); +var url = require('url'); +var util = require('util'); +var _ = require('underscore'); +var extend = require('extend'); +var path = require('path'); + +var azureCommon = require('./../../common/common.core'); +var Md5Wrapper = require('./../../common/md5-wrapper'); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var validate = azureCommon.validate; +var SpeedSummary = azureCommon.SpeedSummary; +var StorageServiceClient = azureCommon.StorageServiceClient; +var WebResource = azureCommon.WebResource; + +// Constants +var Constants = azureCommon.Constants; +var FileConstants = Constants.FileConstants; +var HeaderConstants = Constants.HeaderConstants; +var HttpConstants = Constants.HttpConstants; +var QueryStringConstants = Constants.QueryStringConstants; + +// Streams +var BatchOperation = azureCommon.BatchOperation; +var SpeedSummary = azureCommon.SpeedSummary; +var ChunkAllocator = azureCommon.ChunkAllocator; +var ChunkStream = azureCommon.ChunkStream; +var ChunkStreamWithStream = azureCommon.ChunkStreamWithStream; +var FileRangeStream = require('./internal/filerangestream'); + +// Models requires +var ShareResult = require('./models/shareresult'); +var DirectoryResult = require('./models/directoryresult'); +var FileResult = require('./models/fileresult'); +var AclResult = azureCommon.AclResult; + +// Errors requires +var errors = require('../../common/errors/errors'); +var ArgumentNullError = errors.ArgumentNullError; +var ArgumentError = errors.ArgumentError; + +/** +* Creates a new FileService object. +* If no connection string or storageaccount and storageaccesskey are provided, +* the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. +* @class +* The FileService class is used to perform operations on the Microsoft Azure File Service. +* The File Service provides storage for binary large objects, and provides functions for working with data stored in files. +* +* For more information on the File Service, as well as task focused information on using it in a Node.js application, see +* [How to Use the File Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-file-storage/). +* The following defaults can be set on the file service. +* defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the file service. +* defaultEnableReuseSocket The default boolean value to enable socket reuse when uploading local files or streams. +* If the Node.js version is lower than 0.10.x, socket reuse will always be turned off. +* defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the file service. +* defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the file service. +* defaultLocationMode The default location mode for requests made via the file service. +* parallelOperationThreadCount The number of parallel operations that may be performed when uploading a file. +* useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the file service; true to use the +* Nagle algorithm; otherwise, false. The default value is false. +* enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use +* http(s).Agent({keepAlive:true}). +* @constructor +* @extends {StorageServiceClient} +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {string} [endpointSuffix] The endpoint suffix. +*/ +function FileService(storageAccountOrConnectionString, storageAccessKey, host, sasToken, endpointSuffix) { + var storageServiceSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey, host, sasToken, endpointSuffix); + + FileService['super_'].call(this, + storageServiceSettings._name, + storageServiceSettings._key, + storageServiceSettings._fileEndpoint, + storageServiceSettings._usePathStyleUri, + storageServiceSettings._sasToken); + + this.defaultEnableReuseSocket = Constants.DEFAULT_ENABLE_REUSE_SOCKET; + this.singleFileThresholdInBytes = FileConstants.DEFAULT_SINGLE_FILE_GET_THRESHOLD_IN_BYTES; + this.parallelOperationThreadCount = Constants.DEFAULT_PARALLEL_OPERATION_THREAD_COUNT; +} + +util.inherits(FileService, StorageServiceClient); + +// Utility methods + +/** +* Create resource name +* @ignore +* +* @param {string} share Share name +* @param {string} [directory] Directory name +* @param {string} [file] File name +* @return {string} The encoded resource name. +*/ +function createResourceName(share, directory, file, forSAS) { + var encode = function(name) { + if (name && !forSAS) { + name = encodeURIComponent(name); + name = name.replace(/%2F/g, '/'); + name = name.replace(/%5C/g, '/'); + name = name.replace(/\+/g, '%20'); + } + return name; + }; + + var name = share; + + if (directory) { + // if directory does not start with '/', add it + if (directory[0] !== '/') { + name += ('/'); + } + + name += encode(directory); + } + + if (file) { + // if the current path does not end with '/', add it + if (name[name.length - 1] !== '/') { + name += ('/'); + } + + name += encode(file); + } + + return path.normalize(name).replace(/\\/g, '/'); +} + +// File service methods + +/** +* Gets the properties of a storage account's File service, including Azure Storage Analytics. +* +* @this {FileService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link ServiceProperties}` will contain the properties +* and `response` will contain information related to this operation. +*/ +FileService.prototype.getServiceProperties = function (optionsOrCallback, callback) { + return this.getAccountServiceProperties(optionsOrCallback, callback); +}; + +/** +* Sets the properties of a storage account's File service, including Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* +* @this {FileService} +* @param {object} serviceProperties The service properties. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +FileService.prototype.setServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + return this.setAccountServiceProperties(serviceProperties, optionsOrCallback, callback); +}; + +// Share methods + +/** +* Lists a segment containing a collection of share items under the specified account. +* +* @this {FileService} +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.maxResults] Specifies the maximum number of shares to return per call to Azure storage. +* @param {string} [options.include] Include this parameter to specify that the share's metadata be returned as part of the response body. (allowed values: '', 'metadata', 'snapshots' or any combination of them) +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[shares]{@link ShareResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listSharesSegmented = function (currentToken, optionsOrCallback, callback) { + this.listSharesSegmentedWithPrefix(null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of share items whose names begin with the specified prefix under the specified account. +* +* @this {FileService} +* @param {string} prefix The prefix of the share name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {string} [options.prefix] Filters the results to return only shares whose name begins with the specified prefix. +* @param {int} [options.maxResults] Specifies the maximum number of shares to return per call to Azure storage. +* @param {string} [options.include] Include this parameter to specify that the share's metadata be returned as part of the response body. (allowed values: '', 'metadata', 'snapshots' or any combination of them) +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[shares]{@link ShareResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listSharesSegmentedWithPrefix = function (prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listShares', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOption(QueryStringConstants.INCLUDE, options.include); + + if (!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + webResource.withQueryOption(QueryStringConstants.PREFIX, prefix); + + //options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listSharesResult = null; + + if (!responseObject.error) { + responseObject.listSharesResult = { + entries: null, + continuationToken: null + }; + responseObject.listSharesResult.entries = []; + + var shares = []; + + if (responseObject.response.body.EnumerationResults.Shares && responseObject.response.body.EnumerationResults.Shares.Share) { + shares = responseObject.response.body.EnumerationResults.Shares.Share; + if (!_.isArray(shares)) { + shares = [shares]; + } + } + + shares.forEach(function (currentShare) { + var shareResult = ShareResult.parse(currentShare); + responseObject.listSharesResult.entries.push(shareResult); + }); + + if (responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listSharesResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listSharesResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listSharesResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listSharesResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a share exists on the service. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* the share information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype.doesShareExist = function (share, optionsOrCallback, callback) { + this._doesShareExist(share, false, optionsOrCallback, callback); +}; + +/** +* Creates a new share under the specified account. +* If a share with the same name already exists, the operation fails. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {int} [options.quota] Specifies the maximum size of the share, in gigabytes. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* the share information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createShare = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createShare', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.shareQuotaIsValid(userOptions.quota); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withHeader(HeaderConstants.SHARE_QUOTA, options.quota); + + webResource.addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + + if (options.metadata) { + responseObject.shareResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a share snapshot. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the ID of the snapshot. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createShareSnapshot = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createShareSnapshot', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, QueryStringConstants.SNAPSHOT); + webResource.addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.snapshotId = null; + if (!responseObject.error) { + responseObject.snapshotId = responseObject.response.headers[HeaderConstants.SNAPSHOT]; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.snapshotId, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new share under the specified account if the share does not exists. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* the share information including `created` boolean member. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.createShareIfNotExists('taskshare', function(error) { +* if(!error) { +* // Share created or already existed +* } +* }); +*/ +FileService.prototype.createShareIfNotExists = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createShareIfNotExists', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.shareSnapshotId; + + var self = this; + self._doesShareExist(share, true, options, function(error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if(error){ + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createShare(share, options, function (createError, responseShare, createResponse) { + if(!createError){ + responseShare.created = true; + } + else if (createError && createError.statusCode === HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.FileErrorCodeStrings.SHARE_ALREADY_EXISTS) { + // If it was created before, there was no actual error. + createError = null; + createResponse.isSuccessful = true; + } + + callback(createError, responseShare, createResponse); + }); + } + }); +}; + +/** +* Retrieves a share and its properties from a specified account. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. + + +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* information for the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getShareProperties = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getShareProperties', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + //options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the properties for the specified share. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [properties] The share properties to set. +* @param {string|int} [properties.quota] Specifies the maximum size of the share, in gigabytes. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[share]{@link ShareResult}` will contain +* information about the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setShareProperties = function (share, properties, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setShareProperties', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.shareQuotaIsValid(userOptions.quota); + v.callback(callback); + }); + + var options = extend(true, properties, userOptions); + var resourceName = createResourceName(share); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withHeader(HeaderConstants.SHARE_QUOTA, options.quota); + + FileResult.setProperties(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the share statistics for a share. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link ServiceStats}` will contain the stats and +* `response` will contain information related to this operation. +*/ +FileService.prototype.getShareStats = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getShareStats', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'stats'); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = ShareResult.parse(responseObject.response.body, share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + + +/** +* Returns all user-defined metadata for the share. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* information for the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getShareMetadata = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getShareMetadata', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the share's metadata. +* +* Calling the Set Share Metadata operation overwrites all existing metadata that is associated with the share. +* It's not possible to modify an individual name/value pair. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +FileService.prototype.setShareMetadata = function (share, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setShareMetadata', function (v) { + v.string(share, 'share'); + v.object(metadata, 'metadata'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + webResource.addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the share's ACL. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareAclResult}` will contain +* information for the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getShareAcl = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getShareAcl', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'acl'); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.shareResult.signedIdentifiers = AclResult.parse(responseObject.response.body); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the share's ACL. +* +* @this {FileService} +* @param {string} share The share name. +* @param {Object.} signedIdentifiers The share ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareAclResult}` will contain +* information for the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setShareAcl = function (share, signedIdentifiers, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setShareAcl', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var policies = null; + if (signedIdentifiers) { + if(_.isArray(signedIdentifiers)) { + throw new TypeError(SR.INVALID_SIGNED_IDENTIFIERS); + } + policies = AclResult.serialize(signedIdentifiers); + } + + var webResource = WebResource.put(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.CONTENT_LENGTH, !azureutil.objectIsNull(policies) ? Buffer.byteLength(policies) : 0) + .withBody(policies); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + if (signedIdentifiers) { + responseObject.shareResult.signedIdentifiers = signedIdentifiers; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Marks the specified share for deletion. +* The share and any files contained within it are later deleted during garbage collection. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.FileUtilities.ShareSnapshotDeleteOptions.*. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteShare = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteShare', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + if (!azureutil.objectIsNull(options.shareSnapshotId) && !azureutil.objectIsNull(options.deleteSnapshots)) { + throw new ArgumentError('options', SR.INVALID_DELETE_SNAPSHOT_OPTION); + } + + var webResource = WebResource.del(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId) + .withHeader(HeaderConstants.DELETE_SNAPSHOT, options.deleteSnapshots); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified share for deletion if it exists. +* The share and any files contained within it are later deleted during garbage collection. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.FileUtilities.ShareSnapshotDeleteOptions.*. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the share exists and was deleted, or false if the share +* did not exist. +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteShareIfExists = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteShareIfExists', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesShareExist(share, true, options, function (error, result, response) { + if(error){ + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteShare(share, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError){ + deleted = true; + } else if (deleteError && deleteError.statuscode === HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.FileErrorCodeStrings.SHARE_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +// Directory methods + +/** +* Checks whether or not a directory exists on the service. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* the directory information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype.doesDirectoryExist = function (share, directory, optionsOrCallback, callback) { + this._doesDirectoryExist(share, directory, false, optionsOrCallback, callback); +}; + +/** +* Creates a new directory under the specified account. +* If a directory with the same name already exists, the operation fails. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* the directory information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createDirectory = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createDirectory', function (v) { + v.string(share, 'share'); + v.string(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory'); + + webResource.addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.directoryResult = null; + if (!responseObject.error) { + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new directory under the specified account if the directory does not exists. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* the directory information including `created` boolean member. +* already exists. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.createDirectoryIfNotExists('taskshare', taskdirectory', function(error) { +* if(!error) { +* // Directory created or already existed +* } +* }); +*/ +FileService.prototype.createDirectoryIfNotExists = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createDirectoryIfNotExists', function (v) { + v.string(share, 'share'); + v.string(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.shareSnapshotId; + + var self = this; + self._doesDirectoryExist(share, directory, true, options, function(error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if(error){ + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createDirectory(share, directory, options, function (createError, responseDirectory, createResponse) { + if(!createError){ + responseDirectory.created = true; + } + else if (createError && createError.statusCode === HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.StorageErrorCodeStrings.RESOURCE_ALREADY_EXISTS) { + // If it was created before, there was no actual error. + createError = null; + createResponse.isSuccessful = true; + } + + callback(createError, responseDirectory, createResponse); + }); + } + }); +}; + +/** +* Retrieves a directory and its properties from a specified account. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* information for the directory. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getDirectoryProperties = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getDirectoryProperties', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.directoryResult = null; + if (!responseObject.error) { + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified directory for deletion. The directory must be empty before it can be deleted. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteDirectory = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteDirectory', function (v) { + v.string(share, 'share'); + v.string(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory'); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified directory for deletion if it exists. The directory must be empty before it can be deleted. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the directory exists and was deleted, or false if the directory +* did not exist. +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteDirectoryIfExists = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteDirectoryIfExists', function (v) { + v.string(share, 'share'); + v.string(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.shareSnapshotId; + + var self = this; + self._doesDirectoryExist(share, directory, true, options, function(error, result, response) { + if(error){ + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteDirectory(share, directory, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError){ + deleted = true; + } else if (deleteError && deleteError.statuscode === HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.StorageErrorCodeStrings.RESOURCE_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Lists a segment containing a collection of file items in the directory. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.maxResults] Specifies the maximum number of files to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* entries.files which contains a list of `[files]{@link FileResult}`, entries.directories which contains a list of `[directories]{@link DirectoryResult}` and the continuationToken for the next listing operation. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listFilesAndDirectoriesSegmented = function (share, directory, currentToken, optionsOrCallback, callback) { + this.listFilesAndDirectoriesSegmentedWithPrefix(share, directory, null /*prefix*/, currentToken, optionsOrCallback, callback); +}; + + +/** +* Lists a segment containing a collection of file items in the directory. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} prefix The prefix of the directory/files name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.maxResults] Specifies the maximum number of files to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* entries.files which contains a list of `[files]{@link FileResult}`, entries.directories which contains a list of `[directories]{@link DirectoryResult}` and the continuationToken for the next listing operation. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listFilesAndDirectoriesSegmentedWithPrefix = function (share, directory, prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listFilesSegmented', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + if (!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + webResource.withQueryOption(QueryStringConstants.PREFIX, prefix); + + var processResponseCallback = function (responseObject, next) { + responseObject.listResult = null; + if (!responseObject.error) { + responseObject.listResult = { + entries: null, + continuationToken: null + }; + + responseObject.listResult.entries = {}; + responseObject.listResult.entries.files = []; + responseObject.listResult.entries.directories = []; + var files = []; + var directories = []; + + // parse files + if (responseObject.response.body.EnumerationResults.Entries.File) { + files = responseObject.response.body.EnumerationResults.Entries.File; + if (!_.isArray(files)) { + files = [ files ]; + } + } + + files.forEach(function (currentFile) { + var fileResult = FileResult.parse(currentFile); + responseObject.listResult.entries.files.push(fileResult); + }); + + // parse directories + if (responseObject.response.body.EnumerationResults.Entries.Directory) { + directories = responseObject.response.body.EnumerationResults.Entries.Directory; + if (!_.isArray(directories)) { + directories = [ directories ]; + } + } + + directories.forEach(function (currentDirectory) { + var directoryResult = DirectoryResult.parse(currentDirectory); + responseObject.listResult.entries.directories.push(directoryResult); + }); + + // parse continuation token + if(responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Returns all user-defined metadata for the specified directory. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* information about the directory. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getDirectoryMetadata = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getDirectoryMetadata', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.directoryResult = null; + if (!responseObject.error) { + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined metadata for the specified directory as one or more name-value pairs +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* information on the directory. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setDirectoryMetadata = function (share, directory, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setDirectoryMetadata', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.object(metadata, 'metadata'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + webResource.addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.directoryResult = null; + if (!responseObject.error) { + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// File methods + +/** +* Retrieves a shared access signature token. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} [directory] The directory name. Use '' to refer to the base directory. +* @param {string} [file] The file name. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired (The UTC value will be used). +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {object} [headers] The optional header values to set for a file returned wth this SAS. +* @param {string} [headers.cacheControl] The optional value of the Cache-Control response header to be returned when this SAS is used. +* @param {string} [headers.contentType] The optional value of the Content-Type response header to be returned when this SAS is used. +* @param {string} [headers.contentEncoding] The optional value of the Content-Encoding response header to be returned when this SAS is used. +* @param {string} [headers.contentLanguage] The optional value of the Content-Language response header to be returned when this SAS is used. +* @param {string} [headers.contentDisposition] The optional value of the Content-Disposition response header to be returned when this SAS is used. +* @return {string} The shared access signature query string. Note this string does not contain the leading "?". +*/ +FileService.prototype.generateSharedAccessSignature = function (share, directory, file, sharedAccessPolicy, headers) { + // check if the FileService is able to generate a shared access signature + if (!this.storageCredentials || !this.storageCredentials.generateSignedQueryString) { + throw new Error(SR.CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY); + } + + // Validate share name. File name is optional. + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.object(sharedAccessPolicy, 'sharedAccessPolicy'); + }); + + var resourceType = FileConstants.ResourceTypes.SHARE; + if (file) { + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + }); + resourceType = FileConstants.ResourceTypes.FILE; + } else { + directory = ''; // If file is not set, directory is not a part of the string to sign. + } + + if (sharedAccessPolicy.AccessPolicy) { + if (!azureutil.objectIsNull(sharedAccessPolicy.AccessPolicy.Start)) { + if (!_.isDate(sharedAccessPolicy.AccessPolicy.Start)) { + sharedAccessPolicy.AccessPolicy.Start = new Date(sharedAccessPolicy.AccessPolicy.Start); + } + + sharedAccessPolicy.AccessPolicy.Start = azureutil.truncatedISO8061Date(sharedAccessPolicy.AccessPolicy.Start); + } + + if (!azureutil.objectIsNull(sharedAccessPolicy.AccessPolicy.Expiry)) { + if (!_.isDate(sharedAccessPolicy.AccessPolicy.Expiry)) { + sharedAccessPolicy.AccessPolicy.Expiry = new Date(sharedAccessPolicy.AccessPolicy.Expiry); + } + + sharedAccessPolicy.AccessPolicy.Expiry = azureutil.truncatedISO8061Date(sharedAccessPolicy.AccessPolicy.Expiry); + } + } + + var resourceName = createResourceName(share, directory, file, true); + return this.storageCredentials.generateSignedQueryString(Constants.ServiceType.File, resourceName, sharedAccessPolicy, null, { headers: headers, resourceType: resourceType }); +}; + +/** +* Retrieves a file or directory URL. +* +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} [file] The file name. File names may not start or end with the delimiter '/'. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. +* @param {string} [shareSnapshotId] The snapshot identifier of the share. +* @return {string} The formatted URL string. +* @example +* var azure = require('azure-storage'); +* var fileService = azure.createFileService(); +* var sharedAccessPolicy = { +* AccessPolicy: { +* Permissions: azure.FileUtilities.SharedAccessPermissions.READ, +* Start: startDate, +* Expiry: expiryDate +* }, +* }; +* +* var sasToken = fileService.generateSharedAccessSignature(shareName, directoryName, fileName, sharedAccessPolicy); +* var url = fileService.getUrl(shareName, directoryName, fileName, sasToken, true); +*/ +FileService.prototype.getUrl = function (share, directory, file, sasToken, primary, shareSnapshotId) { + validate.validateArgs('getUrl', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + }); + + var host; + if(!azureutil.objectIsNull(primary) && primary === false) { + host = this.host.secondaryHost; + } else { + host = this.host.primaryHost; + } + host = azureutil.trimPortFromUri(host); + if(host && host.lastIndexOf('/') !== (host.length - 1)){ + host = host + '/'; + } + + var name = createResourceName(share, directory, file); + var query = qs.parse(sasToken); + if(shareSnapshotId) { + query[QueryStringConstants.SHARE_SNAPSHOT] = shareSnapshotId; + } + return url.resolve(host, url.format({pathname: this._getPath(name), query: query})); +}; + +/** +* Returns all user-defined metadata, standard HTTP properties, and system properties for the file. +* It does not return or modify the content of the file. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information about the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getFileProperties = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getFileProperties', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers, true); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Returns all user-defined metadata for the specified file. +* It does not modify or return the content of the file. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information about the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getFileMetadata = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getFileMetadata', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined properties for the specified file. +* It does not modify or return the content of the file. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [properties] The file properties to set. +* @param {string} [properties.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [properties.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [properties.contentLanguage] The natural languages used by this resource. +* @param {string} [properties.cacheControl] The file's cache control. +* @param {string} [properties.contentDisposition] The file's content disposition. +* @param {string} [properties.contentLength] Resizes a file to the specified size. If the specified byte value is less than the current size of the file, +* then all ranges above the specified byte value are cleared. +* @param {string} [properties.contentMD5] The file's MD5 hash. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information about the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setFileProperties = function (share, directory, file, properties, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setFileProperties', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {contentSettings: properties, contentLength: properties.contentLength }, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties'); + + FileResult.setProperties(webResource, options); + + var processResponseCallback = function(responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function(returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined metadata for the specified file as one or more name-value pairs +* It does not modify or return the content of the file. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information on the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setFileMetadata = function (share, directory, file, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setFileMetadata', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.object(metadata, 'metadata'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + webResource.addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Resizes a file. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {String} size The size of the file, in bytes. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information about the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.resizeFile = function (share, directory, file, size, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('resizeFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.value(size); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties'); + + webResource.withHeader(HeaderConstants.FILE_CONTENT_LENGTH, size); + + var processResponseCallback = function(responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function(returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a file exists on the service. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the file information including the `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype.doesFileExist = function (share, directory, file, optionsOrCallback, callback) { + this._doesFileExist(share, directory, file, false, optionsOrCallback, callback); +}; + +/** +* Creates a file of the specified length. If the file already exists on the service, it will be overwritten. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {int} length The length of the file in bytes. +* @param {object} [options] The request options. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createFile = function (share, directory, file, length, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + validate.validateArgs('createFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.value(length); + v.callback(callback); + }); + + var resourceName = createResourceName(share, directory, file); + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(resourceName) + .withHeader(HeaderConstants.TYPE, 'file') + .withHeader(HeaderConstants.FILE_CONTENT_LENGTH, length); + + FileResult.setProperties(webResource, options); + + var processResponseCallback = function(responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function(returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified file for deletion. The file is later deleted during garbage collection. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; `response` will contain information related to this operation. +*/ +FileService.prototype.deleteFile = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.del(resourceName); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified file for deletion if it exists. The file is later deleted during garbage collection. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the file was deleted, or false if the file +* does not exist. +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteFileIfExists = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteFileIfExists', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.shareSnapshotId; + + var self = this; + self._doesFileExist(share, directory, file, true, options, function(error, result, response) { + if(error){ + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteFile(share, directory, file, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError){ + deleted = true; + } else if (deleteError && deleteError.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.FileErrorCodeStrings.FILE_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Downloads a file into a text string. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.rangeStart] The range start. +* @param {int} [options.rangeEnd] The range end. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {FileService~FileToText} callback `error` will contain information +* if an error occurs; otherwise `text` will contain the file contents, +* and `[file]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getFileToText = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getFileToText', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId) + .withRawResponse(); + + FileResult.setHeaders(webResource, options); + this._setRangeContentMD5Header(webResource, options); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.text = null; + responseObject.fileResult = null; + + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers, true); + responseObject.fileResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.text = responseObject.response.body; + + self._validateLengthAndMD5(options, responseObject); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.text, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Provides a stream to read from a file. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {Readable} A Node.js Readable stream. +* @example +* var azure = require('azure-storage'); +* var fileService = azure.createFileService(); +* var writable = fs.createWriteStream(destinationFileNameTarget); +* fileService.createReadStream(shareName, directoryName, fileName).pipe(writable); +*/ +FileService.prototype.createReadStream = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createReadStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + }); + + var options = extend(true, {}, userOptions); + + var readStream = new ChunkStream(options); + this.getFileToStream(share, directory, file, readStream, options, function (error, fileResponse, response) { + if(error) { + readStream.emit('error', error); + } + + if(callback) { + callback(error, fileResponse, response); + } + }); + + return readStream; +}; + +/** +* Downloads a file into a stream. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small files. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +* +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.getFileToStream('taskshare', taskdirectory', 'task1', fs.createWriteStream('task1-download.txt'), function(error, serverFile) { +* if(!error) { +* // file available in serverFile.file variable +* } +* }); +*/ +FileService.prototype.getFileToStream = function (share, directory, file, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + userOptions.speedSummary = userOptions.speedSummary || new SpeedSummary(file); + + validate.validateArgs('getFileToStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(writeStream, 'writeStream'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var propertiesRequestOptions = { + timeoutIntervalInMs : options.timeoutIntervalInMs, + clientRequestTimeoutInMs : options.clientRequestTimeoutInMs, + accessConditions : options.accessConditions, + shareSnapshotId : options.shareSnapshotId + }; + + if (options.skipSizeCheck) { + this._getFileToStream(share, directory, file, writeStream, options, callback); + } else { + var self = this; + this.getFileProperties(share, directory, file, propertiesRequestOptions, function (error, properties) { + if (error) { + callback(error); + } else { + var size; + if (options.rangeStart) { + var endOffset = properties.contentLength - 1; + var end = options.rangeEnd ? Math.min(options.rangeEnd, endOffset) : endOffset; + size = end - options.rangeStart + 1; + } else { + size = properties.contentLength; + } + options.speedSummary.totalSize = size; + + if (size > self.singleFileThresholdInBytes) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], azureutil.tryGetValueChain(properties, ['contentSettings', 'contentMD5'], null)); + self._getFileToRangeStream(share, directory, file, writeStream, options, callback); + } else { + self._getFileToStream(share, directory, file, writeStream, options, callback); + } + } + }); + } + + return options.speedSummary; +}; + +/** +* Lists file ranges. Lists all of the ranges by default, or only the ranges over a specific range of bytes if rangeStart and rangeEnd are specified. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.rangeStart] The range start. +* @param {int} [options.rangeEnd] The range end. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the range information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listRanges = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listRanges', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var resourceName = createResourceName(share, directory, file); + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'rangelist') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + FileResult.setHeaders(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.ranges = null; + if (!responseObject.error) { + responseObject.ranges = []; + + var ranges = []; + if (responseObject.response.body.Ranges.Range) { + ranges = responseObject.response.body.Ranges.Range; + + if (!_.isArray(ranges)) { + ranges = [ ranges ]; + } + } + + ranges.forEach(function (fileRange) { + var range = { + start: parseInt(fileRange.Start, 10), + end: parseInt(fileRange.End, 10) + }; + + responseObject.ranges.push(range); + }); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.ranges, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Clears a range. Clears all of the ranges by default, or only the ranges over a specific range of bytes if rangeStart and rangeEnd are specified. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the directory information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.clearRange = function (share, directory, file, rangeStart, rangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('clearRange', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.value(rangeStart); + v.value(rangeEnd); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var request = this._updateFilesImpl(share, directory, file, rangeStart, rangeEnd, FileConstants.RangeWriteOptions.CLEAR, options); + + var processResponseCallback = function(responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function(returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(request, null, options, processResponseCallback); +}; + +/** +* Updates a range from a stream. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.transactionalContentMD5] An optional hash value used to ensure transactional integrity for the page. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createRangesFromStream = function (share, directory, file, readStream, rangeStart, rangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createRangesFromStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(readStream, 'readStream'); + v.shareNameIsValid(share); + v.value(rangeStart); + v.value(rangeEnd); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var requiresContentMD5 = azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5 === true; + + var length = (rangeEnd - rangeStart) + 1; + if(length > FileConstants.MAX_UPDATE_FILE_SIZE) { + throw new Error(SR.INVALID_FILE_RANGE_FOR_UPDATE); + } + + var self = this; + if (requiresContentMD5) { + azureutil.calculateMD5(readStream, length, options, function(internalBuff, contentMD5) { + options.transactionalContentMD5 = contentMD5; + self._createRanges(share, directory, file, internalBuff, null /* stream */, rangeStart, rangeEnd, options, callback); + }); + } else { + self._createRanges(share, directory, file, null /* text */, readStream, rangeStart, rangeEnd, options, callback); + } +}; + +/** +* Uploads a file from a text string. If the file already exists on the service, it will be overwritten. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string|object} text The file text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {FileService~FileToText} callback `error` will contain information +* if an error occurs; otherwise `text` will contain the file contents, +* and `[file]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @example +* var azure = require('azure-storage'); +* var fileService = azure.createFileService(); +* +* var text = 'Hello World!'; +* +* fileService.createFileFromText('taskshare', 'taskdirectory', 'taskfile', text, function(error, result, response) { +* if (!error) { +* // file created +* } +* }); +*/ +FileService.prototype.createFileFromText = function (share, directory, file, text, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createFileFromText', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var length = azureutil.objectIsNull(text) ? 0 : ((Buffer.isBuffer(text) ? text.length : Buffer.byteLength(text))); + if (length > FileConstants.MAX_UPDATE_FILE_SIZE) { + throw new Error(SR.INVALID_FILE_LENGTH); + } + + if(options.storeFileContentMD5 && azureutil.objectIsNull(azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null))) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], azureutil.getContentMd5(text)); + } + + var self = this; + this.createFile(share, directory, file, length, options, function(error, fileResult, response) { + if(error || length === 0) { + callback(error, fileResult, response); + } + else { + self._createRanges(share, directory, file, text, null, 0, length - 1, options, callback); + } + }); +}; + +/** +* Uploads a file from a stream. If the file already exists on the service, it will be overwritten. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* @example +* var stream = require('stream'); +* var azure = require('azure-storage'); +* var fileService = azure.createFileService(); +* +* var fileStream = new stream.Readable(); +* fileStream.push(myFileBuffer); +* fileStream.push(null); +* +* fileService.createFileFromStream('taskshare', 'taskdirectory', 'taskfile', fileStream, myFileBuffer.length, function(error, result, response) { +* if (!error) { +* // file uploaded +* } +* }); +*/ +FileService.prototype.createFileFromStream = function(share, directory, file, stream, streamLength, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createFileFromStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(stream, 'stream'); + v.value(streamLength, 'streamLength'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(file); + + stream.pause(); // Immediately pause the stream in order to compatible with Node v0.8 + + var self = this; + this.createFile(share, directory, file, streamLength, options, function(error) { + if(error) { + callback(error); + } else { + var chunkStream = new ChunkStreamWithStream(stream, {calcContentMd5: options.storeFileContentMD5}); + self._createFileFromChunkStream(share, directory, file, chunkStream, streamLength, options, callback); + } + }); + + return options.speedSummary; +}; + +/** +* Provides a stream to write to a file. Assumes that the file exists. +* If it does not, please create the file using createFile before calling this method or use createWriteStreamNewFile. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.createFile(shareName, directoryName, fileName, 1024, function (err) { +* // Pipe file to a file +* var stream = fs.createReadStream(fileNameTarget).pipe(FileService.createWriteStreamToExistingFile(shareName, directoryName, fileName)); +* }); +*/ +FileService.prototype.createWriteStreamToExistingFile = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createWriteStreamToExistingFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + }); + + var options = extend(true, {}, userOptions); + + var stream = new ChunkStream({calcContentMd5: options.storeFileContentMD5}); + this._createFileFromChunkStream(share, directory, file, stream, null, options, function (error, file, response) { + if(error) { + stream.emit('error', error); + } + + if (callback) { + callback(error, file, response); + } + }); + + return stream; +}; + +/** +* Provides a stream to write to a file. Creates the file before writing data. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string} length The file length. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* var stream = fs.createReadStream(fileNameTarget).pipe(FileService.createWriteStreamToNewFile(shareName, directoryName, fileName)); +*/ +FileService.prototype.createWriteStreamToNewFile = function (share, directory, file, length, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createWriteStreamToNewFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.value(length, 'length'); + v.shareNameIsValid(share); + }); + + var options = extend(true, {}, userOptions); + + var stream = new ChunkStream({calcContentMd5: options.storeFileContentMD5}); + stream.pause(); + + var self = this; + this.createFile(share, directory, file, length, options, function(error) { + if(error) { + stream.emit('error', error); + callback(error); + } + else { + stream.resume(); + self._createFileFromChunkStream(share, directory, file, stream, null, options, function (error, file, response) { + if(error) { + stream.emit('error', error); + } + + if (callback) { + callback(error, file, response); + } + }); + } + }); + + return stream; +}; + +/** +* Starts to copy a file to a destination within the storage account. +* +* @this {FileService} +* @param {string} sourceUri The source file or blob URI. +* @param {string} targetShare The target share name. +* @param {string} targetDirectory The target directory name. +* @param {string} targetFile The target file name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The target file metadata key/value pairs. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {AccessConditions} [options.sourceAccessConditions] The source access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.startCopyFile = function (sourceUri, targetShare, targetDirectory, targetFile, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('startCopyFile', function (v) { + v.string(targetShare, 'targetShare'); + v.stringAllowEmpty(targetDirectory, 'targetDirectory'); + v.string(targetFile, 'targetFile'); + v.shareNameIsValid(targetShare); + v.callback(callback); + }); + + var targetResourceName = createResourceName(targetShare, targetDirectory, targetFile); + + var options = extend(true, {}, userOptions); + + var webResource = WebResource.put(targetResourceName) + .withHeader(HeaderConstants.COPY_SOURCE, sourceUri) + .addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(targetShare, targetDirectory, targetFile); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers, true); + + if (options.metadata) { + responseObject.fileResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Abort a file copy operation. +* +* @this {FileService} +* @param {string} share The destination share name. +* @param {string} directory The destination directory name. +* @param {string} file The destination file name. +* @param {string} copyId The copy operation identifier. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.abortCopyFile = function (share, directory, file, copyId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('abortCopyFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var resourceName = createResourceName(share, directory, file); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COPY_ID, copyId) + .withQueryOption(QueryStringConstants.COMP, 'copy') + .withHeader(HeaderConstants.COPY_ACTION, 'abort'); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// Internal Methods + +/** +* Updates a file from text. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string} text The text string. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {bool} [options.transactionalContentMD5] An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, file, response)} callback `error` will contain information +* if an error occurs; otherwise `file` will contain +* the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype._createRanges = function (share, directory, file, text, readStream, rangeStart, rangeEnd, options, callback) { + var request = this._updateFilesImpl(share, directory, file, rangeStart, rangeEnd, FileConstants.RangeWriteOptions.UPDATE, options); + + // At this point, we have already validated that the range is less than 4MB. Therefore, we just need to calculate the contentMD5 if required. + if(!azureutil.objectIsNull(text) && azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5 === true) { + request.withHeader(HeaderConstants.CONTENT_MD5, azureutil.getContentMd5(text)); + } + + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if(!azureutil.objectIsNull(text)) { + this.performRequest(request, text, options, processResponseCallback); + } else { + this.performRequestOutputStream(request, readStream, options, processResponseCallback); + } +}; + +/** +* Uploads a file from a stream. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object|function} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The MD5 hash of the file content. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {function(error, null)} callback The callback function. +* @return {SpeedSummary} +*/ +FileService.prototype._createFileFromChunkStream = function(share, directory, file, chunkStream, streamLength, options, callback) { + this.logger.debug(util.format('_createFileFromChunkStream for file %s', file)); + + var apiName = '_createRanges'; + var sizeLimitation = FileConstants.DEFAULT_WRITE_SIZE_IN_BYTES; + var originalContentMD5 = azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null); + + this._setOperationExpiryTime(options); + + // initialize the speed summary + var speedSummary = options.speedSummary || new SpeedSummary(); + speedSummary.totalSize = streamLength; + + var parallelOperationThreadCount = options.parallelOperationThreadCount || this.parallelOperationThreadCount; + + // initialize chunk allocator + var allocator = new ChunkAllocator(sizeLimitation, parallelOperationThreadCount, { logger: this.logger }); + + // if this is a FileReadStream, set the allocator on that stream + if (chunkStream._stream && chunkStream._stream.setMemoryAllocator) { + chunkStream._stream.setMemoryAllocator(allocator); + } + + // initialize batch operations + var batchOperations = new BatchOperation(apiName, { logger : this.logger, enableReuseSocket : this.defaultEnableReuseSocket}); + batchOperations.setConcurrency(parallelOperationThreadCount); + + // initialize options + var rangeOptions = { + timeoutIntervalInMs: options.timeoutIntervalInMs, + clientRequestTimeoutInMs: options.clientRequestTimeoutInMs, + operationExpiryTime: options.operationExpiryTime + }; + + var self = this; + chunkStream.on('data', function (data, range) { + var operation = null; + var full = false; + var autoIncrement = speedSummary.getAutoIncrementFunction(data.length); + + if(data.length > sizeLimitation) { + throw new Error(util.format(SR.EXCEEDED_SIZE_LIMITATION, sizeLimitation, data.length)); + } + + if (options.useTransactionalMD5) { + //calculate content md5 for the current uploading block data + var contentMD5 = azureutil.getContentMd5(data); + rangeOptions.transactionalContentMD5 = contentMD5; + } + + if (azureutil.isBufferAllZero(data)) { + self.logger.debug(util.format('Skip upload data from %s bytes to %s bytes to file %s', range.start, range.end, file)); + speedSummary.increment(data.length); + } else { + operation = new BatchOperation.RestOperation(self, apiName, share, directory, file, data, null, range.start, range.end, rangeOptions, function (error) { + if(!error) { + autoIncrement(); + } else { + self.logger.debug(util.format('Stop downloading data as error happens. Error: %s', util.inspect(error))); + chunkStream.stop(); + } + + allocator.releaseBuffer(data); + data = null; + }); + } + + if (operation) { + full = batchOperations.addOperation(operation); + operation = null; + + if(full) { + self.logger.debug('file stream paused'); + chunkStream.pause(); + } + } + }); + + chunkStream.on('end', function () { + self.logger.debug(util.format('File read stream ended for file %s', file)); + batchOperations.enableComplete(); + }); + + batchOperations.on('drain', function () { + self.logger.debug('File stream resume'); + chunkStream.resume(); + }); + + batchOperations.on('end', function (error) { + self.logger.debug('batch operations commited'); + + if (error) { + callback(error); + return; + } + + if (originalContentMD5) { + options.contentSettings.contentMD5 = originalContentMD5; + } else if (options.storeFileContentMD5) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], chunkStream.getContentMd5('base64')); + } + + // upload file completely + var fileProperties = extend(false, options.contentSettings, { contentLength: options.streamLength }); + self.setFileProperties(share, directory, file, fileProperties, function (error, file, response) { + chunkStream.finish(); + callback(error, file, response); + }); + }); + + return speedSummary; +}; + +/** +* Downloads a file into a stream. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the file information. +* `response` will contain information related to this operation. +* +* @return {SpeedSummary} +*/ +FileService.prototype._getFileToStream = function (share, directory, file, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_getFileToStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(writeStream, 'writeStream'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId) + .withRawResponse(); + + FileResult.setHeaders(webResource, options); + this._setRangeContentMD5Header(webResource, options); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers, true); + + self._validateLengthAndMD5(options, responseObject); + + if (options.speedSummary) { + options.speedSummary.increment(responseObject.length); + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequestInputStream(webResource, null, writeStream, options, processResponseCallback); +}; + +/** +* Downloads a file into a range stream. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the file information. +* `response` will contain information related to this operation. +* +* @return {SpeedSummary} +*/ +FileService.prototype._getFileToRangeStream = function (share, directory, file, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_getFileToRangeStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(writeStream, 'writeStream'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var speedSummary = userOptions.speedSummary || new SpeedSummary(file); + var parallelOperationThreadCount = userOptions.parallelOperationThreadCount || this.parallelOperationThreadCount; + var batchOperations = new BatchOperation('getfile', { callbackInOrder: true, logger : this.logger, enableReuseSocket : this.defaultEnableReuseSocket }); + batchOperations.setConcurrency(parallelOperationThreadCount); + + var rangeStream = new FileRangeStream(this, share, directory, file, userOptions); + + var self = this; + var checkMD5sum = !userOptions.disableContentMD5Validation; + var md5Hash = null; + if (checkMD5sum) { + md5Hash = new Md5Wrapper().createMd5Hash(); + } + + var savedFileResult = null; + var savedFileResponse = null; + + rangeStream.on('range', function (range) { + if (!speedSummary.totalSize) { + speedSummary.totalSize = rangeStream.rangeSize; + } + + var requestOptions = { + rangeStart : range.start, + rangeEnd : range.end, + responseEncoding : null //Use Buffer to store the response data + }; + + var rangeSize = range.size; + requestOptions.shareSnapshotId = userOptions.shareSnapshotId; + requestOptions.timeoutIntervalInMs = userOptions.timeoutIntervalInMs; + requestOptions.clientRequestTimeoutInMs = userOptions.clientRequestTimeoutInMs; + requestOptions.useTransactionalMD5 = userOptions.useTransactionalMD5; + + if (range.dataSize === 0) { + var autoIncrement = speedSummary.getAutoIncrementFunction(rangeSize); + //No operation to do and only wait for write zero to file in callback + var writeZeroOperation = new BatchOperation.CommonOperation(BatchOperation.noOperation, function (error) { + if (error) return; + var bufferAvailable = azureutil.writeZerosToStream(writeStream, rangeSize, md5Hash, autoIncrement); + //There is no need to pause the rangestream since we can perform http request and write disk at the same time + self.logger.debug(util.format('Write %s bytes Zero from %s to %s', rangeSize, range.start, range.end)); + if (!bufferAvailable) { + self.logger.debug('Write stream is full and pause batch operation'); + batchOperations.pause(); + } + }); + batchOperations.addOperation(writeZeroOperation); + return; + } + + if (range.start > range.end) { + return; + } + + var operation = new BatchOperation.RestOperation(self, 'getFileToText', share, directory, file, requestOptions, function (error, content, fileResult, response) { + if (!error) { + if (rangeSize !== content.length) { + self.logger.warn(util.format('Request %s bytes, but server returns %s bytes', rangeSize, content.length)); + } + //Save one of the succeeded callback parameters and use them at the final callback + if (!savedFileResult) { + savedFileResult = fileResult; + } + if (!savedFileResponse) { + savedFileResponse = response; + } + var autoIncrement = speedSummary.getAutoIncrementFunction(content.length); + var bufferAvailable = writeStream.write(content, autoIncrement); + if (!bufferAvailable) { + self.logger.debug('Write stream is full and pause batch operation'); + batchOperations.pause(); + } + if (md5Hash) { + md5Hash.update(content); + } + content = null; + } else { + self.logger.debug(util.format('Stop downloading data as error happens. Error: %s', util.inspect(error))); + rangeStream.stop(); + } + }); + + var full = batchOperations.addOperation(operation); + if (full) { + self.logger.debug('Pause range stream'); + rangeStream.pause(); + } + }); + + rangeStream.on('end', function () { + self.logger.debug('Range stream has ended.'); + batchOperations.enableComplete(); + }); + + batchOperations.on('drain', function () { + self.logger.debug('Resume range stream'); + rangeStream.resume(); + }); + + writeStream.on('drain', function () { + self.logger.debug('Resume batch operations'); + batchOperations.resume(); + }); + + batchOperations.on('end', function (error) { + self.logger.debug('Download completed!'); + if (error) { + callback(error); + } else { + writeStream.end(function () { + self.logger.debug('Write stream has ended'); + if (!savedFileResult) { + savedFileResult = {}; + } + azureutil.setObjectInnerPropertyValue(savedFileResult, ['contentSettings', 'contentMD5'], azureutil.tryGetValueChain(userOptions, ['contentSettings', 'contentMD5'], null)); + savedFileResult.clientSideContentMD5 = null; + if (md5Hash) { + savedFileResult.clientSideContentMD5 = md5Hash.digest('base64'); + } + callback(error, savedFileResult, savedFileResponse); + }); + } + }); + + var listOptions = { + timeoutIntervalInMs : userOptions.timeoutIntervalInMs, + clientRequestTimeoutInMs : userOptions.clientRequestTimeoutInMs, + }; + + rangeStream.list(listOptions); + return speedSummary; +}; + +/** +* @ignore +*/ +FileService.prototype._setRangeContentMD5Header = function (webResource, options) { + if(!azureutil.objectIsNull(options.rangeStart) && options.useTransactionalMD5) { + if(azureutil.objectIsNull(options.rangeEnd)) { + throw new ArgumentNullError(util.format(SR.ARGUMENT_NULL_OR_EMPTY, options.rangeEndHeader)); + } + + var size = parseInt(options.rangeEnd, 10) - parseInt(options.rangeStart, 10) + 1; + if (size > FileConstants.MAX_RANGE_GET_SIZE_WITH_MD5) { + throw new Error(SR.INVALID_RANGE_FOR_MD5); + } else { + webResource.withHeader(HeaderConstants.RANGE_GET_CONTENT_MD5, 'true'); + } + } +}; + +/** +* @ignore +*/ +FileService.prototype._updateFilesImpl = function (share, directory, file, rangeStart, rangeEnd, writeMethod, options) { + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'range') + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/octet-stream') + .withHeader(HeaderConstants.FILE_WRITE, writeMethod); + + options.rangeStart = rangeStart; + options.rangeEnd = rangeEnd; + + FileResult.setHeaders(webResource, options); + + if(writeMethod === FileConstants.RangeWriteOptions.UPDATE) { + var size = (rangeEnd - rangeStart) + 1; + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, size); + } else { + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, 0); + } + + return webResource; +}; + +/** +* @ignore +*/ +FileService.prototype._validateLengthAndMD5 = function (options, responseObject) { + var storedMD5 = responseObject.response.headers[Constants.HeaderConstants.CONTENT_MD5]; + var contentLength; + + if (!azureutil.objectIsNull(responseObject.response.headers[Constants.HeaderConstants.CONTENT_LENGTH])) { + contentLength = parseInt(responseObject.response.headers[Constants.HeaderConstants.CONTENT_LENGTH], 10); + } + + // If the user has not specified this option, the default value should be false. + if(azureutil.objectIsNull(options.disableContentMD5Validation)) { + options.disableContentMD5Validation = false; + } + + // None of the below cases should be retried. So set the error in every case so the retry policy filter handle knows that it shouldn't be retried. + if (options.disableContentMD5Validation === false && options.useTransactionalMD5 === true && azureutil.objectIsNull(storedMD5)) { + responseObject.error = new Error(SR.MD5_NOT_PRESENT_ERROR); + responseObject.retryable = false; + } + + // Validate length and if required, MD5. + // If getFileToText called this method, then the responseObject.length and responseObject.contentMD5 are not set. Calculate them first using responseObject.response.body and then validate. + if(azureutil.objectIsNull(responseObject.length)) { + if (typeof responseObject.response.body == 'string') { + responseObject.length = Buffer.byteLength(responseObject.response.body); + } else if (Buffer.isBuffer(responseObject.response.body)) { + responseObject.length = responseObject.response.body.length; + } + } + + if(!azureutil.objectIsNull(contentLength) && responseObject.length !== contentLength) { + responseObject.error = new Error(SR.CONTENT_LENGTH_MISMATCH); + responseObject.retryable = false; + } + + if(options.disableContentMD5Validation === false && azureutil.objectIsNull(responseObject.contentMD5)) { + responseObject.contentMD5 = azureutil.getContentMd5(responseObject.response.body); + } + + if (options.disableContentMD5Validation === false && !azureutil.objectIsNull(storedMD5) && storedMD5 !== responseObject.contentMD5) { + responseObject.error = new Error(util.format(SR.HASH_MISMATCH, storedMD5, responseObject.contentMD5)); + responseObject.retryable = false; + } +}; + +/** +* Checks whether or not a file exists on the service. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the file information including the `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype._doesFileExist = function (share, directory, file, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('FileExists', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + /*if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + }*/ + + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = new FileResult(share, directory, file); + if (!responseObject.error) { + responseObject.fileResult.exists = true; + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.fileResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a directory exists on the service. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the directory information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype._doesDirectoryExist = function (share, directory, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('directoryExists', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + /*if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + }*/ + + var self = this; + var processResponseCallback = function(responseObject, next){ + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.exists = false; + + if (!responseObject.error) { + responseObject.directoryResult.exists = true; + responseObject.directoryResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a share exists on the service. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the share information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype._doesShareExist = function (share, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('shareExists', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + /*if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + }*/ + + var processResponseCallback = function(responseObject, next){ + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.exists = false; + + if (!responseObject.error) { + responseObject.shareResult.exists = true; + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* The callback for {FileService~getFileToText}. +* @typedef {function} FileService~FileToText +* @param {object} error If an error occurs, the error information. +* @param {string} text The text returned from the file. +* @param {object} file Information about the file. +* @param {object} response Information related to this operation. +*/ + +FileService.SpeedSummary = SpeedSummary; + +module.exports = FileService; diff --git a/src/node_modules/azure-storage/lib/services/file/fileservice.node.js b/src/node_modules/azure-storage/lib/services/file/fileservice.node.js new file mode 100644 index 0000000..44af80e --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/file/fileservice.node.js @@ -0,0 +1,182 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = require('./../../common/common.node'); +var extend = require('extend'); +var fs = require('fs'); +var FileService = require('./fileservice.core'); + +var azureutil = azureCommon.util; +var FileReadStream = azureCommon.FileReadStream; +var SpeedSummary = azureCommon.SpeedSummary; +var validate = azureCommon.validate; + +/** +* Downloads an Azure file into a file. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string} localFileName The local path to the file to be downloaded. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small files. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.getFileToLocalFile('taskshare', taskdirectory', 'task1', 'task1-download.txt', function(error, serverFile) { +* if(!error) { +* // file available in serverFile.file variable +* } +*/ +FileService.prototype.getFileToLocalFile = function (share, directory, file, localFileName, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + userOptions.speedSummary = userOptions.speedSummary || new SpeedSummary(file); + + validate.validateArgs('getFileToLocalFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.string(localFileName, 'localFileName'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var writeStream = fs.createWriteStream(localFileName); + writeStream.on('error', function (error) { + callback(error); + }); + + this.getFileToStream(share, directory, file, writeStream, options, function (error, responseFile, response) { + if (error) { + writeStream.end(function () { + // If the download failed from the beginning, remove the file. + if (fs.existsSync(localFileName) && writeStream.bytesWritten === 0) { + fs.unlinkSync(localFileName); + } + callback(error, responseFile, response); + }); + } else { + callback(error, responseFile, response); + } + }); + + return options.speedSummary; +}; + + +/** +* Uploads a file to storage from a local file. If the file already exists on the service, it will be overwritten. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param (string) localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +FileService.prototype.createFileFromLocalFile = function (share, directory, file, localFileName, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createFileFromLocalFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.string(localFileName, 'localFileName'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(file); + + var self = this; + fs.stat(localFileName, function(error, stat) { + if (error) { + callback(error); + } else { + self.createFile(share, directory, file, stat.size, options, function(error) { + if(error) { + callback(error); + } else { + var stream = new FileReadStream(localFileName, {calcContentMd5: options.storeFileContentMD5}); + self._createFileFromChunkStream(share, directory, file, stream, stat.size, options, callback); + } + }); + } + }); + + return options.speedSummary; +}; + +module.exports = FileService; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/file/fileutilities.js b/src/node_modules/azure-storage/lib/services/file/fileutilities.js new file mode 100644 index 0000000..a4b9288 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/file/fileutilities.js @@ -0,0 +1,72 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'FileUtilities'. +exports = module.exports; + +/** +* Defines constants, enums, and utility functions for use with the File service. +* @namespace FileUtilities +*/ +var FileUtilities = { + /** + * Permission types + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + READ: 'r', + CREATE: 'c', + WRITE: 'w', + DELETE: 'd', + LIST: 'l' + }, + + /** + * Listing details. + * + * @const + * @enum {string} + */ + ListingDetails: { + METADATA: 'metadata' + }, + + /** + * File and share public access types. + * + * @const + * @enum {string} + */ + SharePublicAccessType: { + OFF: null, + SHARE: 'share', + FILE: 'file' + }, + + /** + * Deletion options for share snapshots + * + * @const + * @enum {string} + */ + ShareSnapshotDeleteOptions: { + SHARE_AND_SNAPSHOTS: 'include' + }, +}; + +module.exports = FileUtilities; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/file/internal/filerangestream.js b/src/node_modules/azure-storage/lib/services/file/internal/filerangestream.js new file mode 100644 index 0000000..74788da --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/file/internal/filerangestream.js @@ -0,0 +1,46 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = require('util'); +var RangeStream = require('./../../../common/streams/rangestream'); +var Constants = require('./../../../common/util/constants'); + +/** +* File range stream +*/ +function FileRangeStream(fileServiceClient, share, directory, file, options) { + FileRangeStream['super_'].call(this, fileServiceClient, null, null, options); + + this._lengthHeader = Constants.HeaderConstants.FILE_CONTENT_LENGTH; + if (options.minRangeSize) { + this._minRangeSize = options.minRangeSize; + } else { + this._minRangeSize = Constants.FileConstants.MIN_WRITE_FILE_SIZE_IN_BYTES; + } + if (options.maxRangeSize) { + this._maxRangeSize = options.maxRangeSize; + } else { + this._maxRangeSize = Constants.FileConstants.DEFAULT_WRITE_SIZE_IN_BYTES; + } + this._listFunc = fileServiceClient.listRanges; + this._resourcePath.push(share); + this._resourcePath.push(directory); + this._resourcePath.push(file); +} + +util.inherits(FileRangeStream, RangeStream); + +module.exports = FileRangeStream; diff --git a/src/node_modules/azure-storage/lib/services/file/models/directoryresult.js b/src/node_modules/azure-storage/lib/services/file/models/directoryresult.js new file mode 100644 index 0000000..cefbc52 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/file/models/directoryresult.js @@ -0,0 +1,58 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var HeaderConstants = require('./../../../common/common.core').Constants.HeaderConstants; + +/** +* Creates a new DirectoryResult object. +* @class +* The DirectoryResult class is used to store the directory information. +* + * @property {string} name The container name. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the directory was last modified. + * @property {string} requestId The request id. + * @property {string} serverEncrypted If the directory metadata is completely encrypted using the specified algorithm. true/false. + * +* @constructor +* @param {string} [name] The directory name. +*/ +function DirectoryResult(name) { + this.name = name; +} + +DirectoryResult.parse = function (dirXml) { + return new DirectoryResult(dirXml.Name); +}; + +DirectoryResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setDirectoryPropertyFromHeaders = function (directoryProperty, headerProperty) { + if (!self[directoryProperty] && headers[headerProperty.toLowerCase()]) { + self[directoryProperty] = headers[headerProperty.toLowerCase()]; + } + }; + + setDirectoryPropertyFromHeaders('etag', HeaderConstants.ETAG); + setDirectoryPropertyFromHeaders('lastModified', HeaderConstants.LAST_MODIFIED); + setDirectoryPropertyFromHeaders('requestId', HeaderConstants.REQUEST_ID); + setDirectoryPropertyFromHeaders('serverEncrypted', HeaderConstants.SERVER_ENCRYPTED); +}; + +module.exports = DirectoryResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/file/models/fileresult.js b/src/node_modules/azure-storage/lib/services/file/models/fileresult.js new file mode 100644 index 0000000..e527b26 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/file/models/fileresult.js @@ -0,0 +1,207 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = require('underscore'); + +var azureCommon = require('./../../../common/common.core'); +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new FileResult object. +* @class +* The FileResult class is used to store the file information. +* + * @property {string} share The share name. + * @property {string} directory The directory name. + * @property {string} name The file name. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the file was last modified. + * @property {string} requestId The request id. + * @property {string} acceptRanges The accept ranges. + * @property {string} serverEncrypted If the file data and application metadata are completely encrypted using the specified algorithm. true/false. + * @property {string} contentRange The content range + * @property {string} contentLength The size of the file in bytes. + * @property {object} contentSettings The content settings. + * @property {string} contentSettings.contentType The content type. + * @property {string} contentSettings.contentEncoding The content encoding. + * @property {string} contentSettings.contentLanguage The content language. + * @property {string} contentSettings.cacheControl The cache control. + * @property {string} contentSettings.contentDisposition The content disposition. + * @property {string} contentSettings.contentMD5 The content MD5 hash. + * @property {object} copy The copy information. + * @property {string} copy.id The copy id. + * @property {string} copy.status The copy status. + * @property {string} copy.completionTime The copy completion time. + * @property {string} copy.statusDescription The copy status description. + * @property {string} copy.progress The copy progress. + * @property {string} copy.source The copy source. + * +* @constructor +* @param {string} [share] The share name. +* @param {string} [directory] The directory name. +* @param {string} [name] The file name. +*/ +function FileResult(share, directory, name) { + this.share = share; + this.directory = directory; + this.name = name; +} + +FileResult.parse = function (entryXml) { + var listResult = new FileResult(); + for (var propertyName in entryXml) { + if (propertyName === 'Properties') { + // Lift out the properties onto the main object to keep consistent across all APIs like: getFileProperties + azureutil.setPropertyValueFromXML(listResult, entryXml[propertyName], true); + } else { + listResult[propertyName.toLowerCase()] = entryXml[propertyName]; + } + } + + return listResult; +}; + +var responseHeaders = { + 'acceptRanges': 'ACCEPT_RANGES', + 'contentLength': 'CONTENT_LENGTH', + 'contentRange': 'CONTENT_RANGE', + + 'contentSettings.contentType': 'CONTENT_TYPE', + 'contentSettings.contentEncoding': 'CONTENT_ENCODING', + 'contentSettings.contentLanguage': 'CONTENT_LANGUAGE', + 'contentSettings.cacheControl': 'CACHE_CONTROL', + 'contentSettings.contentDisposition': 'CONTENT_DISPOSITION', + 'contentSettings.contentMD5': 'CONTENT_MD5', + 'contentSettings.fileContentMD5': 'FILE_CONTENT_MD5', + + 'copy.id': 'COPY_ID', + 'copy.status': 'COPY_STATUS', + 'copy.source': 'COPY_SOURCE', + 'copy.progress': 'COPY_PROGRESS', + 'copy.completionTime': 'COPY_COMPLETION_TIME', + 'copy.statusDescription': 'COPY_STATUS_DESCRIPTION' +}; + +FileResult.prototype.getPropertiesFromHeaders = function (headers, content) { + var self = this; + + var setFilePropertyFromHeaders = function (fileProperty, headerProperty) { + if (!azureutil.tryGetValueChain(self, fileProperty.split('.'), null) && headers[headerProperty.toLowerCase()]) { + azureutil.setObjectInnerPropertyValue(self, fileProperty.split('.'), headers[headerProperty.toLowerCase()]); + + if (fileProperty === 'copy.progress') { + var info = azureutil.parseCopyProgress(self.copy.progress); + self.copy.bytesCopied = parseInt(info.bytesCopied); + self.copy.totalBytes = parseInt(info.totalBytes); + } + } + }; + + // For range get, 'x-ms-content-md5' indicate the overall MD5 of the file. Try to set the contentMD5 using this header if it presents + setFilePropertyFromHeaders('contentSettings.contentMD5', HeaderConstants.FILE_CONTENT_MD5); + + setFilePropertyFromHeaders('etag', HeaderConstants.ETAG); + setFilePropertyFromHeaders('lastModified', HeaderConstants.LAST_MODIFIED); + setFilePropertyFromHeaders('requestId', HeaderConstants.REQUEST_ID); + setFilePropertyFromHeaders('serverEncrypted', HeaderConstants.SERVER_ENCRYPTED); + + if (content) { + _.chain(responseHeaders).pairs().each(function (pair) { + var property = pair[0]; + var header = HeaderConstants[pair[1]]; + setFilePropertyFromHeaders(property, header); + }); + + } +}; + +/** +* This method sets the HTTP headers and is used by all methods except setFileProperties and createFile. +* Those methods will set the x-ms-* headers using setProperties. +*/ +FileResult.setHeaders = function (webResource, options) { + var setHeaderProperty = function (headerProperty, fileProperty) { + var propertyValue = azureutil.tryGetValueChain(options, fileProperty.split('.'), null); + if (propertyValue) { + webResource.withHeader(headerProperty, propertyValue); + } + }; + + if (options) { + // Content-MD5 + setHeaderProperty(HeaderConstants.CONTENT_MD5, 'transactionalContentMD5'); + + // Content-Length + setHeaderProperty(HeaderConstants.CONTENT_LENGTH, 'contentLength'); + + // Range + if (!azureutil.objectIsNull(options.rangeStart)) { + var range = 'bytes=' + options.rangeStart + '-'; + + if (!azureutil.objectIsNull(options.rangeEnd)) { + range += options.rangeEnd; + } + + webResource.withHeader(HeaderConstants.STORAGE_RANGE, range); + } + } +}; + +/** +* This method sets the x-ms-* headers and is used by setFileProperties and createFile. +* All other methods will set the regular HTTP headers using setHeaders. +*/ +FileResult.setProperties = function (webResource, options) { + var setHeaderProperty = function (headerProperty, fileProperty) { + var propertyValue = azureutil.tryGetValueChain(options, fileProperty.split('.'), null); + if (propertyValue) { + webResource.withHeader(headerProperty, propertyValue); + } + }; + + if (options) { + // Content-Length + setHeaderProperty(HeaderConstants.FILE_CONTENT_LENGTH, 'contentLength'); + + // Content-Type + setHeaderProperty(HeaderConstants.FILE_CONTENT_TYPE, 'contentSettings.contentType'); + + // Content-Encoding + setHeaderProperty(HeaderConstants.FILE_CONTENT_ENCODING, 'contentSettings.contentEncoding'); + + // Content-Language + setHeaderProperty(HeaderConstants.FILE_CONTENT_LANGUAGE, 'contentSettings.contentLanguage'); + + // Content-Disposition + setHeaderProperty(HeaderConstants.FILE_CONTENT_DISPOSITION, 'contentSettings.contentDisposition'); + + // Cache-Control + setHeaderProperty(HeaderConstants.FILE_CACHE_CONTROL, 'contentSettings.cacheControl'); + + // Content-MD5 + setHeaderProperty(HeaderConstants.FILE_CONTENT_MD5, 'contentSettings.contentMD5'); + + if (options.metadata) { + webResource.addOptionalMetadataHeaders(options.metadata); + } + } +}; + +module.exports = FileResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/file/models/shareresult.js b/src/node_modules/azure-storage/lib/services/file/models/shareresult.js new file mode 100644 index 0000000..823af25 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/file/models/shareresult.js @@ -0,0 +1,84 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = require('./../../../common/common.core'); +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new ShareResult object. +* @class +* The ShareResult class is used to store the share information. +* + * @property {string} name The share name. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the share was last modified. + * @property {string} requestId The request id. + * @property {string} quota The share quota. + * +* @constructor +* @param {string} [name] The share name. +*/ +function ShareResult(name) { + this.name = name; +} + +ShareResult.parse = function (shareXml, name) { + var shareResult = new ShareResult(name); + for (var propertyName in shareXml) { + if (shareXml.hasOwnProperty(propertyName)) { + if (propertyName === 'Properties') { + // Lift out the properties onto the main object to keep consistent across all APIs like: getShareProperties + azureutil.setPropertyValueFromXML(shareResult, shareXml[propertyName], true); + } else if (propertyName === 'Metadata' || propertyName === 'ShareStats') { + var resultPropertyName = azureutil.normalizePropertyNameFromXML(propertyName); + shareResult[resultPropertyName] = {}; + azureutil.setPropertyValueFromXML(shareResult[resultPropertyName], shareXml[propertyName], propertyName === 'ShareStats'); + } else { + shareResult[propertyName.toLowerCase()] = shareXml[propertyName]; + } + } + } + + return shareResult; +}; + +ShareResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setSharePropertyFromHeaders = function (shareProperty, headerProperty) { + if (!self[shareProperty] && headers[headerProperty.toLowerCase()]) { + self[shareProperty] = headers[headerProperty.toLowerCase()]; + } + }; + + setSharePropertyFromHeaders('etag', HeaderConstants.ETAG); + setSharePropertyFromHeaders('lastModified', HeaderConstants.LAST_MODIFIED); + setSharePropertyFromHeaders('requestId', HeaderConstants.REQUEST_ID); + setSharePropertyFromHeaders('quota', HeaderConstants.SHARE_QUOTA); +}; + +/** +* The share ACL settings. +* @typedef {object} ShareAclResult +* @extends {ShareAclResult} +* @property {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +*/ + +module.exports = ShareResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/queue/models/queuemessageresult.js b/src/node_modules/azure-storage/lib/services/queue/models/queuemessageresult.js new file mode 100644 index 0000000..94270ed --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/queue/models/queuemessageresult.js @@ -0,0 +1,130 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = require('./../../../common/common.core'); +var azureutil = azureCommon.util; +var xmlbuilder = azureCommon.xmlbuilder; +var Constants = azureCommon.Constants; + +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new QueueMessageResult object. +* @class +* The QueueMessageResult class is used to store the queue message information. +* +* @property {string} queue The queue name. +* @property {string} messageId The message id. +* @property {string} popReceipt The pop receipt. +* @property {string} messageText The message text. +* @property {string} timeNextVisible The time next visible. +* @property {string} insertionTime The insertion time. +* @property {string} expirationTime The expiration time. +* @property {number} dequeueCount The dequeue count. + * +* @constructor +* @param {string} [queue] The queue name. +* @param {string} [messageId] The message id. +* @param {string} [popReceipt] The pop receipt. +*/ +function QueueMessageResult(queue, messageId, popReceipt) { + if (queue) { + this.queue = queue; + } + + if (messageId) { + this.messageId = messageId; + } + + if (popReceipt) { + this.popReceipt = popReceipt; + } +} + +/** +* Builds an XML representation for a queue message +* +* @param {string} messageJs The queue message. +* @param {QueueMessageEncoder} The message encoder. +* @return {string} The XML queue message. +*/ +QueueMessageResult.serialize = function (messageJs, encoder) { + var doc = xmlbuilder.create(Constants.QueueConstants.QUEUE_MESSAGE_ELEMENT, { version: '1.0', encoding: 'utf-8' }); + + if (messageJs) { + var message; + if (encoder !== null && encoder !== undefined) { + message = encoder.encode(messageJs); + } else { + message = messageJs; + } + + doc.ele(Constants.QueueConstants.MESSAGE_TEXT_ELEMENT) + .txt(message) + .up(); + } else { + doc.ele(Constants.QueueConstants.MESSAGE_TEXT_ELEMENT).up(); + } + + return doc.doc().toString(); +}; + + +/** +* Pase the XML representation of a queue message to a QueueMessageResult object. +* +* @param {Object} messageXml The XML representation of the queue message. +* @param {QueueMessageEncoder} The message encoder. +* @return {QueueMessageResult} The QueueMessageResult object. +*/ +QueueMessageResult.parse = function (messageXml, encoder) { + var queueMessageResult = new QueueMessageResult(); + for (var property in messageXml) { + if (property === Constants.QueueConstants.MESSAGE_TEXT_ELEMENT) { + if (encoder !== null && encoder !== undefined) { + queueMessageResult.messageText = encoder.decode(messageXml[property]); + } else { + queueMessageResult.messageText = messageXml[property]; + } + } else { + var resultPropertyName = azureutil.normalizePropertyNameFromXML(property); + queueMessageResult[resultPropertyName] = messageXml[property]; + } + } + + // Convert dequeueCount to number + if (queueMessageResult.dequeueCount) { + queueMessageResult.dequeueCount = parseInt(queueMessageResult.dequeueCount); + } + + return queueMessageResult; +}; + +QueueMessageResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setmessagePropertyFromHeaders = function (messageProperty, headerProperty) { + if (!self[messageProperty] && headers[headerProperty.toLowerCase()]) { + self[messageProperty] = headers[headerProperty.toLowerCase()]; + } + }; + + setmessagePropertyFromHeaders('popReceipt', HeaderConstants.POP_RECEIPT); + setmessagePropertyFromHeaders('timeNextVisible', HeaderConstants.TIME_NEXT_VISIBLE); +}; + +module.exports = QueueMessageResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/queue/models/queueresult.js b/src/node_modules/azure-storage/lib/services/queue/models/queueresult.js new file mode 100644 index 0000000..bf14244 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/queue/models/queueresult.js @@ -0,0 +1,72 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var Constants = require('./../../../common/common.core').Constants; +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new QueueResult object. +* @class +* The QueueResult class is used to store the queue information. +* +* @property {string} name The queue name. +* @property {object} metadata The metadata key/value pair. +* @property {number} approximateMessageCount The approximate number of messages in the queue. This number is not lower than the actual number of messages in the queue, but could be higher. +* @property {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. + * +* @constructor +* @param {string} [name] The queue name. +* @param {string} [metadata] The metadata key/value pair. +*/ +function QueueResult(name, metadata) { + if (name) { + this.name = name; + } + + if (metadata) { + this.metadata = metadata; + } +} + +QueueResult.parse = function (messageXml) { + var queueResult = new QueueResult(); + for (var property in messageXml) { + if (messageXml.hasOwnProperty(property)) { + queueResult[property.toLowerCase()] = messageXml[property]; + } + } + + return queueResult; +}; + +QueueResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setPropertyFromHeaders = function (queueProperty, headerProperty, typeConverterFunc) { + if (!self[queueProperty] && headers[headerProperty.toLowerCase()]) { + if(typeConverterFunc) { + self[queueProperty] = typeConverterFunc(headers[headerProperty.toLowerCase()]); + } else{ + self[queueProperty] = headers[headerProperty.toLowerCase()]; + } + } + }; + + setPropertyFromHeaders('approximateMessageCount', HeaderConstants.APPROXIMATE_MESSAGES_COUNT, parseInt); +}; + +module.exports = QueueResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/queue/queuemessageencoder.js b/src/node_modules/azure-storage/lib/services/queue/queuemessageencoder.js new file mode 100644 index 0000000..a3d22bf --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/queue/queuemessageencoder.js @@ -0,0 +1,182 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. + +var util = require('util'); + +/** + * The interface for classes that represent a encoder which can be used to specify how the queue service encodes and decodes queue messages. + * + * To specify how the queue service encodes and decodes queue messages, set `queueService.messageEncoder` to object of built-in encoder types + * `[TextBase64QueueMessageEncoder]{@link TextBase64QueueMessageEncoder}`, `[BinaryBase64QueueMessageEncoder]{@link BinaryBase64QueueMessageEncoder}`, `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`, + * or custom implementation of the QueueMessageEncoder. + * + * @class + */ +function QueueMessageEncoder() { +} + +/** + * Function to encode queue messages. + * + * @param {object} [input] The target to be encoded. + * @return {string} + */ +QueueMessageEncoder.prototype.encode = function(input){ + return input; +}; + +/** + * Function to decode queue messages + * + * @param {string} [textToBeDecoded] The base64 string to be decoded. + * @returns {any} + */ +QueueMessageEncoder.prototype.decode = function(textToBeDecoded){ + return textToBeDecoded; +}; + + +/** + * Create a new TextBase64QueueMessageEncoder object + * @class + * + * Encode from utf-8 string to base64 string + * Decode from base64 string to utf-8 string. + * + * @constructor + * @extends {QueueMessageEncoder} + */ +function TextBase64QueueMessageEncoder(){ +} +util.inherits(TextBase64QueueMessageEncoder, QueueMessageEncoder); + +/** + * Encode from utf-8 string to base64 string + * @this TextBase64QueueMessageEncoder + * + * @param {string} [input] The target to be encoded. + * + * @return {string} + */ +TextBase64QueueMessageEncoder.prototype.encode = function(input){ + return Buffer.from(input, 'utf8').toString('base64'); +}; + +/** + * Decode from base64 string to utf-8 string. + * @this TextBase64QueueMessageEncoder + * + * @param {string} [textToBeDecoded] The base64 string to be decoded. + * + * @return {string} + */ +TextBase64QueueMessageEncoder.prototype.decode = function(textToDecode){ + return Buffer.from(textToDecode, 'base64').toString('utf8'); +}; + + +/** + * Create a new BinaryBase64QueueMessageEncoder object + * @class + * + * Encode from binary buffer to base64 string + * Decode from base64 string to binary buffer. + * + * @constructor + * @extends {QueueMessageEncoder} + */ +function BinaryBase64QueueMessageEncoder(){ +} +util.inherits(BinaryBase64QueueMessageEncoder, QueueMessageEncoder); + +/** + * Encode from binary buffer string to base64 string + * @this BinaryBase64QueueMessageEncoder + * + * @param {Buffer} [input] The target to be encoded. + * + * @return {string} + */ +BinaryBase64QueueMessageEncoder.prototype.encode = function(input){ + return input.toString('base64'); +}; + + +/** + * Decode from base64 string to binary buffer. + * @this BinaryBase64QueueMessageEncoder + * + * @param {string} [textToBeDecoded] The base64 string to be decoded. + * + * @return {Buffer} + */ +BinaryBase64QueueMessageEncoder.prototype.decode = function(textToDecode){ + return Buffer.from(textToDecode, 'base64'); +}; + + +/** + * Create a new TextXmlQueueMessageEncoder object + * @class + * + * Encode utf-8 string by escaping the xml markup characters. + * Decode from utf-8 string by unescaping the xml markup characters. + * + * @constructor + * @extends {QueueMessageEncoder} + */ +function TextXmlQueueMessageEncoder(){ +} +util.inherits(TextXmlQueueMessageEncoder, QueueMessageEncoder); + +/** + * Encode utf-8 string by escaping the xml markup characters. + * @this TextXmlQueueMessageEncoder + * + * @param {string} [input] The target to be encoded. + * + * @return {string} + */ +TextXmlQueueMessageEncoder.prototype.encode = function(input){ + return input.replace(/&/gm, '&') + .replace(//gm, '>') + .replace(/"/gm, '"') + .replace(/'/gm, '''); +}; + +/** + * Decode from utf-8 string by unescaping the xml markup characters. + * @this TextXmlQueueMessageEncoder + * + * @param {string} [textToBeDecoded] The base64 string to be decoded. + * + * @return {string} + */ +TextXmlQueueMessageEncoder.prototype.decode = function(textToDecode){ + return textToDecode.replace(/&/gm, '&') + .replace(/</gm, '<') + .replace(/>/gm, '>') + .replace(/"/gm, '"') + .replace(/'/gm, '\''); +}; + +module.exports = QueueMessageEncoder; +module.exports.TextBase64QueueMessageEncoder = TextBase64QueueMessageEncoder; +module.exports.BinaryBase64QueueMessageEncoder = BinaryBase64QueueMessageEncoder; +module.exports.TextXmlQueueMessageEncoder = TextXmlQueueMessageEncoder; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/queue/queueservice.js b/src/node_modules/azure-storage/lib/services/queue/queueservice.js new file mode 100644 index 0000000..a3b05e8 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/queue/queueservice.js @@ -0,0 +1,1472 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = require('util'); +var _ = require('underscore'); +var extend = require('extend'); + +var azureCommon = require('./../../common/common.core'); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var validate = azureCommon.validate; + +var StorageServiceClient = azureCommon.StorageServiceClient; +var WebResource = azureCommon.WebResource; +var Constants = azureCommon.Constants; +var QueryStringConstants = Constants.QueryStringConstants; +var HeaderConstants = Constants.HeaderConstants; +var RequestLocationMode = Constants.RequestLocationMode; + +// Models requires +var QueueResult = require('./models/queueresult'); +var AclResult = azureCommon.AclResult; +var QueueMessageResult = require('./models/queuemessageresult'); +var QueueMessageEncoder = require('./queuemessageencoder'); +var ServiceStatsParser = azureCommon.ServiceStatsParser; + +/** +* Creates a new QueueService object. +* If no connection string or storageaccount and storageaccesskey are provided, +* the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. +* @class +* The QueueService class is used to perform operations on the Microsoft Azure Queue Service. +* +* For more information on using the Queue Service, as well as task focused information on using it from a Node.js application, see +* [How to Use the Queue Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-queues/). +* The following defaults can be set on the Queue service. +* messageEncoder The message encoder to specify how QueueService encodes and decodes the queue message. Default is `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. +* defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Queue service. +* defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Queue service. +* defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Queue service. +* defaultLocationMode The default location mode for requests made via the Queue service. +* useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Queue service; true to use the +* Nagle algorithm; otherwise, false. The default value is false. +* enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use +* http(s).Agent({keepAlive:true}). +* @constructor +* @augments {StorageServiceClient} +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} [sas] The Shared Access Signature string. +* @param {string} [endpointSuffix] The endpoint suffix. +* @param {TokenCredential} [token] The {@link TokenCredential} object. +*/ +function QueueService(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token) { + var storageServiceSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token); + + QueueService['super_'].call(this, + storageServiceSettings._name, + storageServiceSettings._key, + storageServiceSettings._queueEndpoint, + storageServiceSettings._usePathStyleUri, + storageServiceSettings._sasToken, + token); + + if (this.anonymous) { + throw new Error(SR.ANONYMOUS_ACCESS_BLOBSERVICE_ONLY); + } + + /** + * @property {boolean} QueueService#messageEncoder + * @defaultvalue {QueueMessageEncoder} `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. + * The message encoder to specify how QueueService encodes and decodes the queue message. Default is `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. + */ + this.messageEncoder = new QueueMessageEncoder.TextXmlQueueMessageEncoder(); +} + +util.inherits(QueueService, StorageServiceClient); + +/** +* Gets the service stats for a storage account’s Queue service. +* +* @this {QueueService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise, `[result]{@link ServiceStats}` +* will contain the stats and `response` +* will contain information related to this operation. +*/ +QueueService.prototype.getServiceStats = function (optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getServiceStats', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'stats') + .withQueryOption(QueryStringConstants.RESTYPE, 'service'); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.serviceStatsResult = null; + if (!responseObject.error) { + responseObject.serviceStatsResult = ServiceStatsParser.parse(responseObject.response.body.StorageServiceStats); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.serviceStatsResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the properties of a storage account’s Queue service, including Microsoft Azure Storage Analytics. +* +* @this {QueueService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise, `[result]{@link ServiceProperties}` +* will contain the properties and `response` +* will contain information related to this operation. +*/ +QueueService.prototype.getServiceProperties = function (optionsOrCallback, callback) { + return this.getAccountServiceProperties(optionsOrCallback, callback); +}; + +/** +* Sets the properties of a storage account’s Queue service, including Microsoft Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* +* @this {QueueService} +* @param {object} serviceProperties The service properties. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +QueueService.prototype.setServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + return this.setAccountServiceProperties(serviceProperties, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of queue items whose names begin with the specified prefix under the given account. +* +* @this {QueueService} +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {int} [options.maxResults] Specifies the maximum number of queues to return per call to Azure storage. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {string} [options.include] Include this parameter to specify that the queue's metadata be returned as part of the response body. (allowed values: '', 'metadata') +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[queues]{@link QueueResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.listQueuesSegmented = function (currentToken, optionsOrCallback, callback) { + this.listQueuesSegmentedWithPrefix(null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of queue items under the given account. +* +* @this {QueueService} +* @param {string} prefix The prefix of the queue name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation.* @param {string} [options.prefix] Filters the results to return only queues whose name begins with the specified prefix. +* @param {object} [options] The request options. +* @param {string} [options.marker] String value that identifies the portion of the list to be returned with the next list operation. +* @param {int} [options.maxResults] Specifies the maximum number of queues to return per call to Azure storage. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {string} [options.include] Include this parameter to specify that the queue's metadata be returned as part of the response body. (allowed values: '', 'metadata') +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[queues]{@link QueueResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.listQueuesSegmentedWithPrefix = function (prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listQueuesSegmentedWithPrefix', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(); + webResource.withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOption(QueryStringConstants.INCLUDE, options.include) + .withQueryOption(QueryStringConstants.PREFIX, prefix); + + if(!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listQueuesResult = null; + + if (!responseObject.error) { + responseObject.listQueuesResult = { + entries: null, + continuationToken: null + }; + responseObject.listQueuesResult.entries = []; + var queues = []; + + if (responseObject.response.body.EnumerationResults.Queues && responseObject.response.body.EnumerationResults.Queues.Queue) { + queues = responseObject.response.body.EnumerationResults.Queues.Queue; + + if (!_.isArray(queues)) { + queues = [ queues ]; + } + + queues.forEach(function (currentQueue) { + var queueResult = QueueResult.parse(currentQueue); + responseObject.listQueuesResult.entries.push(queueResult); + }); + + if(responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listQueuesResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listQueuesResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listQueuesResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listQueuesResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks to see if a queue exists. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise, `[result]{@link QueueResult}` will contain +* the queue information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.doesQueueExist = function (queue, optionsOrCallback, callback) { + this._doesQueueExist(queue, false, optionsOrCallback, callback); +}; + +/** +* Creates a new queue under the given account. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* the queue information. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.createQueue = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createQueue', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(queue); + if (options) { + webResource.addOptionalMetadataHeaders(options.metadata); + } + + var processResponseCallback = function (responseObject, next) { + responseObject.queueResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue); + if (options && options.metadata) { + responseObject.queueResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new queue under the given account if it doesn't exist. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* the queue information including `created` boolean member and +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* queueService.createQueueIfNotExists('taskqueue', function(error) { +* if(!error) { +* // Queue created or exists +* } +* }); +*/ +QueueService.prototype.createQueueIfNotExists = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createQueueIfNotExists', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesQueueExist(queue, true, options, function(error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if (error) { + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createQueue(queue, options, function(createError, responseQueue, createResponse) { + if (!createError) { + responseQueue.created = true; + } + else if (createError && createError.statusCode === Constants.HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.QueueErrorCodeStrings.QUEUE_ALREADY_EXISTS) { + createError = null; + responseQueue.created = false; + createResponse.isSuccessful = true; + } + + callback(createError, responseQueue, createResponse); + }); + } + }); +}; + +/** +* Permanently deletes the specified queue. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +QueueService.prototype.deleteQueue = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteQueue', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(queue); + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Permanently deletes the specified queue if it exists. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* 'true' if the queue was deleted and 'false' if the queue did not exist. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.deleteQueueIfExists = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteQueueIfExists', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesQueueExist(queue, true, options, function existsCallback(error, existsResult, response) { + if (error) { + callback(error, existsResult.exists, response); + } else if (!existsResult.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteQueue(queue, options, function(deleteError, deleteResponse) { + var deleted; + if (!deleteError) { + deleted = true; + } else if (deleteError && deleteError.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.QueueErrorCodeStrings.QUEUE_NOT_FOUND) { + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Returns queue properties, including user-defined metadata. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* the queue information. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.getQueueMetadata = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getQueueMetadata', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(queue) + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.queueResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue); + responseObject.queueResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* the queue information. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.setQueueMetadata = function (queue, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setQueueMetadata', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(queue) + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.queueResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue, metadata); + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Adds a new message to the back of the message queue. +* The encoded message can be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size for previous versions. +* Unencoded messages must be in a format that can be included in an XML request with UTF-8 encoding. +* Queue messages are encoded using the `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. See queueService.messageEncoder to set encoder defaults. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {string|Buffer} messageText The message text. +* @param {object} [options] The request options. +* @param {int} [options.messageTimeToLive] The time-to-live interval for the message, in seconds. The maximum time-to-live allowed is 7 days. If this parameter is omitted, the default time-to-live is 7 days +* @param {int} [options.visibilityTimeout] Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value smaller than the time-to-live value. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain +* the message. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* queueService.createMessage('taskqueue', 'Hello world!', function(error) { +* if(!error) { +* // Message inserted +* } +* }); +*/ +QueueService.prototype.createMessage = function (queue, messageText, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var xmlMessageDescriptor = QueueMessageResult.serialize(messageText, this.messageEncoder); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.post(queue + '/messages') + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/atom+xml;charset="utf-8"') + .withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(xmlMessageDescriptor, 'utf8')) + .withQueryOption(QueryStringConstants.MESSAGE_TTL, options.messageTimeToLive) + .withQueryOption(QueryStringConstants.VISIBILITY_TIMEOUT, options.visibilityTimeout) + .withBody(xmlMessageDescriptor); + + var messageEncoder = this.messageEncoder; + + var processResponseCallback = function (responseObject, next) { + responseObject.queueMessageResults = []; + + if (responseObject.response && responseObject.response.body && responseObject.response.body.QueueMessagesList && responseObject.response.body.QueueMessagesList.QueueMessage) { + var messages = responseObject.response.body.QueueMessagesList.QueueMessage; + + if (!_.isArray(messages)) { + messages = [ messages ]; + } + + messages.forEach(function (message) { + var queueMessageResult = QueueMessageResult.parse(message, messageEncoder); + responseObject.queueMessageResults.push(queueMessageResult); + }); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, responseObject.queueMessageResults[0], returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieve messages from the queue and makes them invisible to other consumers. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {int} [options.numOfMessages] A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. By default, a single message is retrieved from the queue with this operation. +* @param {int} [options.visibilityTimeout] Required if not peek only. Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* a list of `[messages]{@link QueueMessageResult}`. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* var queueName = 'taskqueue'; +* queueService.getMessages(queueName, function(error, serverMessages) { +* if(!error) { +* // Process the message in less than 30 seconds, the message +* // text is available in serverMessages[0].messagetext +* queueService.deleteMessage(queueName, serverMessages[0].messageId, serverMessages[0].popReceipt, function(error) { +* if(!error){ +* // Message deleted +* } +* }); +* } +* }); +*/ +QueueService.prototype.getMessages = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getMessages', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.peekOnly; + + this._getOrPeekMessages(queue, options, callback); +}; + +/** +* Retrieves a message from the queue and makes it invisible to other consumers. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {int} [options.visibilityTimeout] Required if not peek only. Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain +* the message. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* var queueName = 'taskqueue'; +* queueService.getMessage(queueName, function(error, serverMessage) { +* if(!error) { +* // Process the message in less than 30 seconds, the message +* // text is available in serverMessage.messagetext +* queueService.deleteMessage(queueName, serverMessage.messageId, serverMessage.popReceipt, function(error) { +* if(!error){ +* // Message deleted +* } +* }); +* } +* }); +*/ +QueueService.prototype.getMessage = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.numOfMessages = 1; + + var finalCallback = function(error, messages, response){ + var message; + if(messages && messages.length > 0){ + message = messages[0]; + } + + callback(error, message, response); + }; + + this.getMessages(queue, options, finalCallback); +}; + +/** +* Retrieves messages from the front of the queue, without changing the messages visibility. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {int} [options.numOfMessages] A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. By default, a single message is retrieved from the queue with this operation. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* `[messages]{@link QueueMessageResult}`. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.peekMessages = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('peekMessages', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.peekOnly = true; + delete options.visibilityTimeout; + + this._getOrPeekMessages(queue, options, callback); +}; + +/** +* Retrieves a message from the front of the queue, without changing the message visibility. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain +* the message. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.peekMessage = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('peekMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.numOfMessages = 1; + + var finalCallback = function(error, messages, response){ + var message; + if(messages && messages.length > 0){ + message = messages[0]; + } + + callback(error, message, response); + }; + + this.peekMessages(queue, options, finalCallback); +}; + +/** +* Deletes a specified message from the queue. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {string} messageId The message identifier of the message to delete. +* @param {string} popReceipt A valid pop receipt value returned from an earlier call to the Get Messages or Update Message operation +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +QueueService.prototype.deleteMessage = function (queue, messageId, popReceipt, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + if (azureutil.objectIsNull(popReceipt)) { + throw new Error(SR.INVALID_POP_RECEIPT); + } + + if (azureutil.objectIsNull(messageId)) { + throw new Error(SR.INVALID_MESSAGE_ID); + } + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(queue + '/messages/' + messageId) + .withQueryOption(QueryStringConstants.POP_RECEIPT, popReceipt, null, true); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Clears all messages from the queue. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +QueueService.prototype.clearMessages = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('clearMessages', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(queue + '/messages'); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the visibility timeout of a message. You can also use this operation to update the contents of a message. +* A message must be in a format that can be included in an XML request with UTF-8 encoding, and the encoded message can be up to 64KB in size. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {string} messageId The message identifier of the message to update. +* @param {string} popReceipt A valid pop receipt value returned from an earlier call to the Get Messages or Update Message operation +* @param {int} visibilityTimeout Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. +* @param {object} [options] The request options. +* @param {object} [options.messageText] The new message text. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain +* the message result information. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.updateMessage = function (queue, messageId, popReceipt, visibilityTimeout, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('updateMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + if (azureutil.objectIsNull(popReceipt)) { + throw new Error(SR.INVALID_POP_RECEIPT); + } + + if (azureutil.objectIsNull(messageId)) { + throw new Error(SR.INVALID_MESSAGE_ID); + } + + var options = extend(true, {}, userOptions); + var content = null; + if (options.messageText) { + content = QueueMessageResult.serialize(options.messageText, this.messageEncoder); + } + + var contentLength = content ? Buffer.byteLength(content, 'utf8') : 0; + + var webResource = WebResource.put(queue + '/messages/' + messageId) + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/atom+xml;charset="utf-8"') + .withHeader(HeaderConstants.CONTENT_LENGTH, contentLength) + .withQueryOption(QueryStringConstants.POP_RECEIPT, popReceipt, null, true) + .withQueryOption(QueryStringConstants.VISIBILITY_TIMEOUT, visibilityTimeout) + .withBody(content); + + var processResponseCallback = function (responseObject, next) { + responseObject.queueMessageResult = null; + if (!responseObject.error) { + responseObject.queueMessageResult = new QueueMessageResult(queue, messageId); + responseObject.queueMessageResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueMessageResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Gets the queue's ACL. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* information for the queue. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.getQueueAcl = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getQueueAcl', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(queue) + .withQueryOption(QueryStringConstants.COMP, 'acl'); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.queueResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue); + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.queueResult.signedIdentifiers = AclResult.parse(responseObject.response.body); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the queue's ACL. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @property {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* information for the queue. +* `response` will contain information related to this operation. +* @example +* var azure = require('azure-storage'); +* var SharedAccessPermissions = azure.QueueUtilities.SharedAccessPermissions; +* var queueService = azure.createQueueService(); +* var sharedAccessPolicies = [ +* {AccessPolicy: { +* Permissions: PROCESS, +* Start: startDate, +* Expiry: expiryDate +* }, +* Id: processOnly, +* }, +* {AccessPolicy: { +* Permissions: SharedAccessPermissions.PROCESS + SharedAccessPermissions.DELETE, +* Start: startDate, +* Expiry: expiryDate +* }, +* Id: processAndDelete, +* }]; +* +* queueService.setQueueAcl(queueName, sharedAccessPolicies, function(error, queueResult, response) { +* // do whatever +* }); +*/ +QueueService.prototype.setQueueAcl = function (queue, signedIdentifiers, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setQueueAcl', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var policies = null; + if (signedIdentifiers) { + if(_.isArray(signedIdentifiers)) { + throw new TypeError(SR.INVALID_SIGNED_IDENTIFIERS); + } + policies = AclResult.serialize(signedIdentifiers); + } + + var webResource = WebResource.put(queue) + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.CONTENT_LENGTH, !azureutil.objectIsNull(policies) ? Buffer.byteLength(policies) : 0) + .withBody(policies); + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue); + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + if (signedIdentifiers) { + responseObject.queueResult.signedIdentifiers = signedIdentifiers; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieves a shared access signature token. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired (The UTC value will be used). +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} sharedAccessPolicy.AccessPolicy.Protocols The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @return {string} The shared access signature query string. Note this string does not contain the leading "?". +*/ +QueueService.prototype.generateSharedAccessSignature = function (queue, sharedAccessPolicy) { + // check if the QueueService is able to generate a shared access signature + if (!this.storageCredentials || !this.storageCredentials.generateSignedQueryString) { + throw new Error(SR.CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY); + } + + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.object(sharedAccessPolicy, 'sharedAccessPolicy'); + }); + + return this.storageCredentials.generateSignedQueryString(Constants.ServiceType.Queue, queue, sharedAccessPolicy, null); +}; + +/** +* Checks to see if a queue exists. +* @ignore +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise, `result` will contain +* the queue information including `exists` boolean member +* and `response` will contain information related to this operation. +* +*/ +QueueService.prototype._doesQueueExist = function (queue, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('doesQueueExist', function(v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(queue) + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + var processResponseCallback = function(responseObject, next) { + responseObject.queueResult = new QueueResult(queue); + responseObject.queueResult.exists = false; + + if (!responseObject.error) { + responseObject.queueResult.exists = true; + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.queueResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** + * @ignore + */ +QueueService.prototype._getOrPeekMessages = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_getOrPeekMessages', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if (!options.numOfMessages) { + options.numOfMessages = 1; + } + + var webResource = WebResource.get(queue + '/messages') + .withQueryOption(QueryStringConstants.NUM_OF_MESSAGES, options.numOfMessages) + .withQueryOption(QueryStringConstants.VISIBILITY_TIMEOUT, options.visibilityTimeout) + .withQueryOption(QueryStringConstants.PEEK_ONLY, options.peekOnly); + + if (options.peekOnly) { + // For peek message, it's a read-only action and can be performed against secondary endpoint. + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + + var messageEncoder = this.messageEncoder; + var processResponseCallback = function (responseObject, next) { + responseObject.queueMessageResults = null; + + if (!responseObject.error) { + responseObject.queueMessageResults = []; + + if (responseObject.response.body.QueueMessagesList && responseObject.response.body.QueueMessagesList.QueueMessage) { + var messages = responseObject.response.body.QueueMessagesList.QueueMessage; + + if (!_.isArray(messages)) { + messages = [ messages ]; + } + + messages.forEach(function (message) { + var queueMessageResult = QueueMessageResult.parse(message, messageEncoder); + responseObject.queueMessageResults.push(queueMessageResult); + }); + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueMessageResults, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Retrieves a queue URL. +* +* @param {string} queue The queue name. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. +* @return {string} The formatted URL string. +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* var sharedAccessPolicy = { +* AccessPolicy: { +* Permissions: azure.QueueUtilities.SharedAccessPermissions.READ, +* Start: startDate, +* Expiry: expiryDate +* }, +* }; +* +* var sasToken = queueService.generateSharedAccessSignature(queue, sharedAccessPolicy); +* var sasUrl = queueService.getUrl(queue, sasToken); +*/ +QueueService.prototype.getUrl = function (queue, sasToken, primary) { + validate.validateArgs('getUrl', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + }); + + return this._getUrl(queue, sasToken, primary); +}; + +module.exports = QueueService; diff --git a/src/node_modules/azure-storage/lib/services/queue/queueutilities.js b/src/node_modules/azure-storage/lib/services/queue/queueutilities.js new file mode 100644 index 0000000..eca40a6 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/queue/queueutilities.js @@ -0,0 +1,39 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'QueueUtilities'. +exports = module.exports; + +/** +* Defines enums for use with the Queue service. +* @namespace QueueUtilities +*/ +var QueueUtilities = { + /** + * Permission types. + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + READ: 'r', + ADD: 'a', + UPDATE: 'u', + PROCESS: 'p' + } +}; + +module.exports = QueueUtilities; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/table/internal/edmhandler.js b/src/node_modules/azure-storage/lib/services/table/internal/edmhandler.js new file mode 100644 index 0000000..0ea9e1d --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/internal/edmhandler.js @@ -0,0 +1,197 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var _ = require('underscore'); +var util = require('util'); +var guid = require('uuid'); + +var azureCommon = require('./../../../common/common.core'); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; + +var TableUtilities = require('../tableutilities'); +var EdmType = TableUtilities.EdmType; + +/** +* Get the Edm type of an object. +* +* @param {object} value A typed instance. +* @return {string} The Edm type. +*/ +exports.propertyType = function (value, guessNumberType) { + if (_.isNumber(value)) { + if (guessNumberType) { + if (azureutil.objectIsInt(value)) { + return 'Edm.Int32'; + } else { + return 'Edm.Double'; + } + } else { + return null; + } + } else if (_.isBoolean(value)) { + return 'Edm.Boolean'; + } else if (_.isDate(value)) { + return 'Edm.DateTime'; + } else { + return 'Edm.String'; + } +}; + +/** +* Convert a JSON value from over the wire into the correct EDM type. +* +* Note that Int64, is remaining a string. Converting it to a Number would lose precision. +* Int32, Boolean, and Double should already be the correct non-string types +* +* @param {string} type The type of the value as it appears in the type attribute. +* @param value The value in JSON format. +* @return {object} The unserialized value. +*/ +exports.deserializeValueFromJson = function (type, value) { + if (type) { + switch (type) { + case EdmType.BINARY: + return Buffer.from(value, 'base64'); + case EdmType.DATETIME: + return new Date(value); + case EdmType.GUID: + return value; + case EdmType.DOUBLE: + // Account for Infinity and NaN: + if (typeof value !== 'number') { + return parseFloat(value); + } + return value; + case EdmType.INT32: + case EdmType.INT64: + case EdmType.STRING: + case EdmType.BOOLEAN: + return value; + default: + throw new Error(util.format(SR.TYPE_NOT_SUPPORTED, type)); + } + } else { + return value; + } +}; + +/** +* Convert a raw EdmType value into the JSON value expected to be sent over the wire. +* +* TODO: validate correct input types? +* Expects Edm.Int64 and Edm.String to be string, Edm.Double and Edm.Int32 to be Number, +* Edm.Guid to be an array or buffer compatible with Node.uuid, Edm.Binary to be a Node Buffer, Edm.DateTime to be a Date, +* and Edm.Boolean to be a boolean. +* +* @param {string} type The type of the value as it will appear in the type attribute. +* @param {string} value The value +* @return {object} The serialized value. +*/ +exports.serializeValue = function (type, value) { + switch (type) { + case EdmType.BINARY: + if (Buffer.isBuffer(value)) { + return value.toString('base64'); + } + return value; + case EdmType.DATETIME: + if (_.isDate(value)) { + return value.toISOString(); + } + return value; + case EdmType.GUID: + if (Buffer.isBuffer(value) || _.isArray(value)) { + return guid.unparse(value); + } + return value; + case EdmType.INT64: + case EdmType.DOUBLE: + return value.toString(); + case EdmType.INT32: + if (value === Number.POSITIVE_INFINITY) { + return 'Infinity'; + } + if (value === Number.NEGATIVE_INFINITY) { + return '-Infinity'; + } + if (azureutil.objectIsNaN(value)) { + return 'NaN'; + } + return value; + case EdmType.STRING: + case EdmType.BOOLEAN: + return value; + default: + throw new Error(SR.TYPE_NOT_SUPPORTED + type); + } +}; + +/* +* Determines if a type annotation is required for the input type when sending JSON data to the service. +*/ +exports.isTypeRequired = function(type, value) { + switch (type) { + case EdmType.BINARY: + case EdmType.INT64: + case EdmType.DATETIME: + case EdmType.GUID: + case EdmType.DOUBLE: + return true; + case EdmType.INT32: + if (typeof value !== 'number' || value === Number.POSITIVE_INFINITY || value === Number.NEGATIVE_INFINITY || (azureutil.objectIsNaN(value))) { + return true; + } + return false; + case EdmType.STRING: + case EdmType.BOOLEAN: + return false; + default: + throw new Error(util.format(SR.TYPE_NOT_SUPPORTED, type)); + } +}; + +/** +* Serializes value into proper value to be used in odata query value. +* +* @param {object} value The value to be serialized. +* @return {string} The serialized value. +*/ +exports.serializeQueryValue = function (value, type) { + var edmType = type || exports.propertyType(value, true); + switch (edmType) { + case EdmType.INT32: + return value.toString(); + case EdmType.BOOLEAN: + return value ? 'true' : 'false'; + case EdmType.DOUBLE: + return value.toString(); + case EdmType.INT64: + return value.toString() + 'L'; + case EdmType.DATETIME: + if(_.isDate(value)) { + var dateTimeString = value.toISOString(); + return 'datetime\'' + dateTimeString + '\''; + } + throw new Error(util.format(SR.INVALID_EDM_TYPE, value, type)); + case EdmType.GUID: + return 'guid\'' + value.toString() + '\''; + case EdmType.BINARY: + return 'X\'' + value.toString('hex') + '\''; + default: + return '\'' + value.toString().replace(/'/g, '\'\'') + '\''; + } +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/table/internal/odatahandler.js b/src/node_modules/azure-storage/lib/services/table/internal/odatahandler.js new file mode 100644 index 0000000..0b64db0 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/internal/odatahandler.js @@ -0,0 +1,217 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = require('util'); + +var azureCommon = require('./../../../common/common.core'); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var Constants = azureCommon.Constants; +var edmHandler = require('./edmhandler'); + +var prefixLength = Constants.TableConstants.ODATA_PREFIX.length; +var suffixLength = Constants.TableConstants.ODATA_TYPE_SUFFIX.length; + +exports = module.exports; + +/* Serialize an entity to an Odata (Json based) payload +* Input must be in the following format: +* { stringValue: { '$': 'Edm.String', '_': 'my string' }, myInt: { '$': 'Edm.Int32', '_': 3 } } +*/ +exports.serializeJson = function (entity) { + function normalizeEntityProperty(property) { + if(azureutil.objectIsNull(property)) { + return { _: property }; + } + + if (typeof property === 'object' && property.hasOwnProperty(Constants.TableConstants.ODATA_VALUE_MARKER)) { + return property; + } + + var result = { _: property }; + result[Constants.TableConstants.ODATA_TYPE_MARKER] = edmHandler.propertyType(property, true); + + return result; + } + + var result = {}; + for (var propName in entity) { + // ignore if .metadata or null or undefined + if (propName !== Constants.TableConstants.ODATA_METADATA_MARKER) { + var property = normalizeEntityProperty(entity[propName]); + if (!azureutil.objectIsNull(property[Constants.TableConstants.ODATA_VALUE_MARKER])) { + var value = property[Constants.TableConstants.ODATA_VALUE_MARKER]; + var type = property[Constants.TableConstants.ODATA_TYPE_MARKER]; + + if (type === undefined) { + type = edmHandler.propertyType(value, true); + } + + result[propName] = edmHandler.serializeValue(type, value); + if (edmHandler.isTypeRequired(type, value)) { + result[propName + Constants.TableConstants.ODATA_TYPE_SUFFIX] = type; + } + } + } + } + + var replacer = function(key, value) { + if (value === Number.POSITIVE_INFINITY) { + return 'Infinity'; + } + if (value === Number.NEGATIVE_INFINITY) { + return '-Infinity'; + } + if (azureutil.objectIsNaN(value)) { + return 'NaN'; + } + return value; + }; + + return JSON.stringify(result, replacer); +}; + +/* +Input: The body of the HTTP response from the server from a table list as JSON (responseObject.response.body). + +Return: +This will return an array in the following format: + +[ + tableName1, + tableName2 +] + +For example, + +[ + myTable1, + myTable2 +] + +*/ +exports.parseJsonTables = function (response) { + var result = []; + + if (response.value) { + for (var i = 0; i < response.value.length; i++) { + var entity = response.value[i].TableName; + result.push(entity); + } + } + + return result; +}; + +/* +Input: The body of the HTTP response from the server from a table query as JSON (responseObject.response.body). + +Return: +This will return an array in the following format: + +[ + {{ '$': edmHandler1, '_': value1}, { '$': edmHandler2, '_': value2}, { '$': edmHandler3, '_': value3}}, + {{ '$': edmHandler4, '_': value4}, { '$': edmHandler5, '_': value5}, { '$': edmHandler6, '_': value6}} +] + +For example, + +[ + {{ '$': Edm.Int32, '_': 42}, { '$': Edm.String, '_': 'sample string'}, { '$': Edm.Boolean, '_': false}}, + {{ '$': Edm.Int64, '_': 42}, { '$': Edm.String, '_': 'sample string 2'}, { '$': Edm.Boolean, '_': true}} +] + +*/ +exports.parseJsonEntities = function (response, autoResolveProperties, propertyResolver, entityResolver) { + if (!response.value) { + return [exports.parseJsonSingleEntity(response, autoResolveProperties, propertyResolver, entityResolver)]; + } else { + var result = []; + + for (var i = 0; i < response.value.length; i++) { + var rawEntity = response.value[i]; + var entity = exports.parseJsonSingleEntity(rawEntity, autoResolveProperties, propertyResolver, entityResolver); + result.push(entity); + } + + return result; + } +}; + +exports.parseJsonSingleEntity = function(rawEntity, autoResolveProperties, propertyResolver, entityResolver) { + var rawEntityProperties = {}; + var entityPropertyTypes = {PartitionKey: 'Edm.String', RowKey: 'Edm.String', Timestamp: 'Edm.DateTime'}; + var odataMetadata = {}; + + // parse properties + for (var entityPropertyName in rawEntity) { + if (azureutil.stringStartsWith(entityPropertyName, Constants.TableConstants.ODATA_PREFIX)) { + odataMetadata[entityPropertyName.slice(prefixLength)] = rawEntity[entityPropertyName]; + } else if (azureutil.stringEndsWith(entityPropertyName, Constants.TableConstants.ODATA_TYPE_SUFFIX)) { + entityPropertyTypes[entityPropertyName.slice(0, entityPropertyName.length - suffixLength)] = rawEntity[entityPropertyName]; + } else { + rawEntityProperties[entityPropertyName] = rawEntity[entityPropertyName]; + } + } + + // make sure etag is set + if (!odataMetadata.etag && rawEntityProperties.Timestamp) { + var timestampString = Buffer.from(rawEntityProperties.Timestamp).toString(); + odataMetadata.etag = 'W/"datetime\'' + timestampString + '\'"'; + } + + var entity = {}; + for (var entityPropertyName in rawEntityProperties) { + if (rawEntityProperties.hasOwnProperty(entityPropertyName)) { + // set the type, if given in the response + var entityPropertyType = entityPropertyTypes[entityPropertyName]; + entity[entityPropertyName] = {}; + + // use the given property resolver if present, otherwise infer type if undefined + if (propertyResolver) { + // partition key, row key, name, value, type if present + entityPropertyType = propertyResolver(rawEntityProperties.PartitionKey, rawEntityProperties.RowKey, entityPropertyName, rawEntityProperties[entityPropertyName], entityPropertyType); + } + if (!entityPropertyType && autoResolveProperties) { + entityPropertyType = edmHandler.propertyType(rawEntityProperties[entityPropertyName], false); + } + + if (entityPropertyType) { + entity[entityPropertyName][Constants.TableConstants.ODATA_TYPE_MARKER] = entityPropertyType; + } + + try { + entity[entityPropertyName][Constants.TableConstants.ODATA_VALUE_MARKER] = edmHandler.deserializeValueFromJson(entityPropertyType, rawEntityProperties[entityPropertyName]); + } catch (err) { + if (propertyResolver) { + // if a property resolver was used and the type is invalid, throw an appropriate error + throw new Error(util.format(SR.INVALID_PROPERTY_RESOLVER, entityPropertyName, entityPropertyType, rawEntityProperties[entityPropertyName])); + } else { + throw err; + } + } + } + } + + entity[Constants.TableConstants.ODATA_METADATA_MARKER] = odataMetadata; + + if (entityResolver) { + entity = entityResolver(entity); + } + + return entity; +}; diff --git a/src/node_modules/azure-storage/lib/services/table/internal/requesthandler.js b/src/node_modules/azure-storage/lib/services/table/internal/requesthandler.js new file mode 100644 index 0000000..1dc416e --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/internal/requesthandler.js @@ -0,0 +1,135 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = require('util'); +var azureCommon = require('./../../../common/common.core'); +var WebResource = azureCommon.WebResource; +var SR = azureCommon.SR; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; +var TableConstants = Constants.TableConstants; +var entityResult = require('../models/entityresult'); + +exports = module.exports; + +/** +* Retrieves the entity path from the table name and an entity descriptor. +* @ignore +* +* @param {string} table The table name. +* @param {object} entity The entity descriptor. +* @return {string} The entity path. +*/ +function getEntityPath (tableName, partitionKey, rowKey) { + var path = '/' + tableName; + + if (typeof (partitionKey) === 'string' && typeof (rowKey) === 'string') { + // Escape single quotes according to OData Protocol Specification: "single quotes within string literals are represented as two consecutive single quotes". + partitionKey = partitionKey.replace(/'/g, '\'\''); + rowKey = rowKey.replace(/'/g, '\'\''); + path = path + '(PartitionKey=\'' + encodeURIComponent(partitionKey.toString('utf8')) + '\',RowKey=\'' + encodeURIComponent(rowKey.toString('utf8')) + '\')'; + } else { + throw new Error(SR.INCORRECT_ENTITY_KEYS); + } + + return path; +} + +/** +* Constructs the web resource for a table operation. +* +* @param {string} operation The operation to perform. +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The create options or callback function. +* @param {boolean} [options.checkEtag] Boolean value indicating weather the etag should be matched or not. +* @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Default to false. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @return {webResource} +*/ +exports.constructEntityWebResource = function (operation, table, entityDescriptor, options) { + var webResource = null; + if (operation === TableConstants.Operations.INSERT) { + webResource = WebResource.post(table) + .withHeader(HeaderConstants.PREFER, options.echoContent ? HeaderConstants.PREFER_CONTENT : HeaderConstants.PREFER_NO_CONTENT); + } else { + var partitionKey; + var rowKey; + + if (typeof (entityDescriptor.PartitionKey) === 'string') { + partitionKey = entityDescriptor.PartitionKey; + } else { + partitionKey = entityDescriptor.PartitionKey[TableConstants.ODATA_VALUE_MARKER]; + } + + if (typeof (entityDescriptor.RowKey) === 'string') { + rowKey = entityDescriptor.RowKey; + } else { + rowKey = entityDescriptor.RowKey[TableConstants.ODATA_VALUE_MARKER]; + } + + var path = getEntityPath(table, partitionKey, rowKey); + + if (operation === TableConstants.Operations.DELETE) { + webResource = WebResource.del(path); + } else if (operation === TableConstants.Operations.MERGE || operation === TableConstants.Operations.INSERT_OR_MERGE) { + webResource = WebResource.merge(path); + } else if (operation === TableConstants.Operations.REPLACE || operation === TableConstants.Operations.INSERT_OR_REPLACE) { + webResource = WebResource.put(path); + } else if (operation === TableConstants.Operations.RETRIEVE) { + webResource = WebResource.get(path); + } else { + throw new Error(util.format(SR.INVALID_TABLE_OPERATION, operation)); + } + } + + if (operation === TableConstants.Operations.DELETE || operation === TableConstants.Operations.REPLACE || operation === TableConstants.Operations.MERGE) { + webResource.withHeader(HeaderConstants.IF_MATCH, entityResult.getEtag(entityDescriptor) || '*'); + } + + var entitySerializedDescriptor; + if (!(operation === TableConstants.Operations.DELETE || operation === TableConstants.Operations.RETRIEVE)) { + entitySerializedDescriptor = entityResult.serialize(entityDescriptor); + } + + exports.setTableRequestHeadersAndBody(webResource, entitySerializedDescriptor, options.payloadFormat); + + return webResource; +}; + +/** +* Sets the table request headers. +* +* @param {string} webResource The webResource to add headers to. +* @param {object} [body] The body of the request. +*/ +exports.setTableRequestHeadersAndBody = function (webResource, body, acceptType) { + if (body) { + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(body, 'utf8')) + .withBody(body) + .withHeader(HeaderConstants.CONTENT_TYPE, HeaderConstants.JSON_CONTENT_TYPE_VALUE); + } + + webResource.withHeader(HeaderConstants.ACCEPT, acceptType) + .withHeader(HeaderConstants.MAX_DATA_SERVICE_VERSION, TableConstants.DEFAULT_DATA_SERVICE_VERSION); +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/table/internal/sharedkeytable.js b/src/node_modules/azure-storage/lib/services/table/internal/sharedkeytable.js new file mode 100644 index 0000000..e84dd9e --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/internal/sharedkeytable.js @@ -0,0 +1,92 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = require('util'); +var azureCommon = require('./../../../common/common.core'); +var SharedKey = azureCommon.SharedKey; +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; +var QueryStringConstants = Constants.QueryStringConstants; + +/** +* Creates a new SharedKeyTable object. +* +* @constructor +* @param {string} storageAccount The storage account. +* @param {string} storageAccessKey The storage account's access key. +* @param {bool} usePathStyleUri Boolean value indicating if the path, or the hostname, should include the storage account. +*/ +function SharedKeyTable(storageAccount, storageAccessKey, usePathStyleUri) { + SharedKeyTable['super_'].call(this, + storageAccount, + storageAccessKey, + usePathStyleUri); +} + +util.inherits(SharedKeyTable, SharedKey); + +/** +* Signs a request with the Authentication header. +* +* @param {WebResource} The webresource to be signed. +* @param {function(error)} callback The callback function. +*/ +SharedKeyTable.prototype.signRequest = function (webResource, callback) { + var getvalueToAppend = function (value) { + if (azureutil.objectIsNull(value)) { + return '\n'; + } else { + return value + '\n'; + } + }; + + var stringToSign = + webResource.method + '\n' + + getvalueToAppend(webResource.headers[HeaderConstants.CONTENT_MD5]) + + getvalueToAppend(webResource.headers[HeaderConstants.CONTENT_TYPE]) + + getvalueToAppend(webResource.headers[HeaderConstants.MS_DATE]) + + this._getCanonicalizedResource(webResource); + + var signature = this.signer.sign(stringToSign); + + webResource.withHeader(HeaderConstants.AUTHORIZATION, 'SharedKey ' + this.storageAccount + ':' + signature); + callback(null); +}; + +/* +* Retrieves the webresource's canonicalized resource string. +* @param {WebResource} webResource The webresource to get the canonicalized resource string from. +* @return {string} The canonicalized resource string. +*/ +SharedKeyTable.prototype._getCanonicalizedResource = function (webResource) { + var path = '/'; + if (webResource.path) { + path = webResource.path; + } + + var canonicalizedResource = '/' + this.storageAccount + path; + + var queryStringValues = webResource.queryString; + if (queryStringValues[QueryStringConstants.COMP]) { + canonicalizedResource += '?comp=' + queryStringValues[QueryStringConstants.COMP]; + } + + return canonicalizedResource; +}; + +module.exports = SharedKeyTable; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/table/models/batchresult.js b/src/node_modules/azure-storage/lib/services/table/models/batchresult.js new file mode 100644 index 0000000..88a5491 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/models/batchresult.js @@ -0,0 +1,237 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = require('./../../../common/common.core'); +var azureutil = azureCommon.util; +var Md5Wrapper = require('./../../../common/md5-wrapper'); +var StorageServiceClient = azureCommon.StorageServiceClient; +var WebResource = azureCommon.WebResource; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; +var TableConstants = Constants.TableConstants; + +var RequestHandler = require('../internal/requesthandler'); +var entityResult = require('./entityresult'); + +/** +* Creates a new BatchResult. +* +* @param {TableService} tableService The table service. +* @param {string} table The table name. +* @param {array} operations The array of batch operations. +* @constructor +* @ignore +*/ +function BatchResult(tableService, table, operations) { + this.tableService = tableService; + this.table = table; + this.operations = operations; + this.batchBoundary = 'batch_' + BatchResult._getBoundary(); + this.changesetBoundary = 'changeset_' + BatchResult._getBoundary(); +} + +/** +* Gets a boundary string. +* +* @return {string} The boundary string. +* @ignore +*/ +BatchResult._getBoundary = function () { + return (new Md5Wrapper().createMd5Hash()).update('' + (new Date()).getTime()).digest('hex'); +}; + +/** +* Constructs the batch web request. +* +* @return {WebResource} The batch WebResource. +* @ignore +*/ +BatchResult.prototype.constructWebResource = function () { + var webResource = WebResource.post('$batch') + .withRawResponse(true); + + webResource.withHeader(HeaderConstants.CONTENT_TYPE, 'multipart/mixed; charset="utf-8"; boundary=' + this.batchBoundary); + webResource.withHeader(HeaderConstants.DATA_SERVICE_VERSION, '3.0;'); + webResource.withHeader(HeaderConstants.MAX_DATA_SERVICE_VERSION, '3.0;NetFx'); + + return webResource; +}; + +/** +* Serializes the batch web body. +* +* @return {string} The serialized batch content. +* @ignore +*/ +BatchResult.prototype.serialize = function () { + var body = '--' + this.batchBoundary + '\n'; + + if (this.operations.length === 1 && this.operations[0].type === TableConstants.Operations.RETRIEVE) { + body += HeaderConstants.CONTENT_TYPE + ': application/http\n'; + body += HeaderConstants.CONTENT_TRANSFER_ENCODING + ': binary\n\n'; + body += this._serializeOperation(this.operations[0]); + } else { + body += HeaderConstants.CONTENT_TYPE + ': multipart/mixed;charset="utf-8";boundary=' + this.changesetBoundary + '\n\n'; + + for (var i = 0; i < this.operations.length; i++) { + body += '--' + this.changesetBoundary + '\n'; + body += HeaderConstants.CONTENT_TYPE + ': application/http\n'; + body += HeaderConstants.CONTENT_TRANSFER_ENCODING + ': binary\n\n'; + body += this._serializeOperation(this.operations[i], i) + '\n'; + } + body += '--' + this.changesetBoundary + '--\n'; + } + body += '--' + this.batchBoundary + '--'; + + return body; +}; + +/** +* Serializes a request within the batch. +* +* @param {object} The operation to serialize. +* @param {number} The index of the operation in the operations arrray. +* @return {string} The serialized operation content. +* @ignore +*/ +BatchResult.prototype._serializeOperation = function (operation, count) { + operation.options.payloadFormat = operation.options.payloadFormat || this.tableService.defaultPayloadFormat; + var webResource = RequestHandler.constructEntityWebResource(operation.type, this.table, operation.entity, operation.options); + + if (count) { + webResource.headers[HeaderConstants.CONTENT_ID] = count; + } + + var contentType = webResource.headers[HeaderConstants.CONTENT_TYPE]; + if (contentType) { + if (!azureutil.stringEndsWith(contentType, ';')) { + webResource.headers[HeaderConstants.CONTENT_TYPE] += ';'; + } + webResource.headers[HeaderConstants.CONTENT_TYPE] += 'type=entry'; + } + + this.tableService._setRequestUrl(webResource); + + var content = webResource.method + ' ' + webResource.uri + ' HTTP/1.1\n'; + + Object.keys(webResource.headers).forEach(function (header) { + content += header + ': ' + webResource.headers[header] + '\n'; + }); + + content += '\n'; + content += webResource.body || ''; + + return content; +}; + +/** +* Parses a batch response. +* +* @param {object} responseObject The response object for the batch request. +* @return {array} An array with the processed / parsed responses. +*/ +BatchResult.prototype.parse = function (responseObject) { + var responses = null; + if (responseObject && responseObject.response && responseObject.response.body && + typeof responseObject.response.body === 'string') { + responses = []; + var rawResponses = responseObject.response.body.split(TableConstants.CHANGESET_DELIMITER); + + if(rawResponses.length === 1) { + rawResponses = responseObject.response.body.split(TableConstants.BATCH_DELIMITER); + } + + var self = this; + rawResponses.forEach(function (rawResponse) { + // Find HTTP/1.1 CODE line + var httpLocation = rawResponse.indexOf('HTTP/1.1'); + if (httpLocation !== -1) { + rawResponse = rawResponse.substring(httpLocation); + + // valid response + var response = self._parseOperation(rawResponse); + responses.push(response); + } + }); + } + + return responses; +}; + +/** +* Parses a partial response. +* +* @param {string} rawResponse The raw, unparsed, http response from the server for the batch response. +* @return {object} A response object. +*/ +BatchResult.prototype._parseOperation = function (rawResponse) { + var responseObject = { + error: null, + response: { } + }; + + // Split into multiple lines and process them + var responseLines = rawResponse.split('\r\n'); + + if (responseLines.length > 0) { + // Retrieve response code + var headers = responseLines.shift().split(' '); + if (headers.length >= 2) { + responseObject.response.statusCode = parseInt(headers[1]); + responseObject.response.isSuccessful = WebResource.validResponse(responseObject.response.statusCode); + } + + // Populate headers + responseObject.response.headers = { }; + responseObject.response.body = ''; + + var isBody = false; + responseLines.forEach(function (line) { + if (line === '' && !isBody) { + isBody = true; + } else if (isBody) { + responseObject.response.body += line; + } else { + var headerSplit = line.indexOf(':'); + if (headerSplit !== -1) { + responseObject.response.headers[line.substring(0, headerSplit).trim().toLowerCase()] = line.substring(headerSplit + 1).trim(); + } + } + }); + + StorageServiceClient._parseResponse(responseObject.response, this.tableService.xml2jsSettings); + if (!responseObject.response.isSuccessful) { + responseObject.error = StorageServiceClient._normalizeError(responseObject.response.body, responseObject.response); + } + + if (!responseObject.error) { + var index = responseObject.response.headers[HeaderConstants.CONTENT_ID] || 0; + var propertyResolver; + var entityResolver; + if (index && this.operations[index]) { + var options = this.operations[index].options; + propertyResolver = options.propertyResolver; + entityResolver = options.entityResolver; + } + responseObject.entity = entityResult.parseEntity(responseObject.response, propertyResolver, entityResolver); + } + } + + return responseObject; +}; + +module.exports = BatchResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/table/models/entityresult.js b/src/node_modules/azure-storage/lib/services/table/models/entityresult.js new file mode 100644 index 0000000..f703e1f --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/models/entityresult.js @@ -0,0 +1,62 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = require('./../../../common/common.core'); +var Constants = azureCommon.Constants; +var TableConstants = Constants.TableConstants; +var HeaderConstants = Constants.HeaderConstants; +var odataHandler = require('../internal/odatahandler'); + +exports = module.exports; + +exports.serialize = function (entity) { + return odataHandler.serializeJson(entity); +}; + +exports.parseQuery = function (response, autoResolveProperties, propertyResolver, entityResolver) { + var result = {}; + if (response.body) { + result = odataHandler.parseJsonEntities(response.body, autoResolveProperties, propertyResolver, entityResolver); + } + + return result; +}; + +exports.parseEntity = function (response, autoResolveProperties, propertyResolver, entityResolver) { + var result = {}; + if (response.body) { + result = odataHandler.parseJsonSingleEntity(response.body, autoResolveProperties, propertyResolver, entityResolver); + } + + if (response.headers && response.headers[HeaderConstants.ETAG.toLowerCase()]) { + if (!result[TableConstants.ODATA_METADATA_MARKER]) { + result[TableConstants.ODATA_METADATA_MARKER] = {}; + } + + result[TableConstants.ODATA_METADATA_MARKER].etag = response.headers[HeaderConstants.ETAG.toLowerCase()]; + } + + return result; +}; + +exports.getEtag = function (entity) { + var etag; + if (entity && entity[TableConstants.ODATA_METADATA_MARKER]) { + etag = entity[TableConstants.ODATA_METADATA_MARKER].etag; + } + return etag; +}; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/table/models/tableresult.js b/src/node_modules/azure-storage/lib/services/table/models/tableresult.js new file mode 100644 index 0000000..46adb04 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/models/tableresult.js @@ -0,0 +1,37 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var odataHandler = require('../internal/odatahandler'); + +function TableResult(name) { + this.name = name; +} + +TableResult.serialize = function (tableName) { + return JSON.stringify({ TableName: tableName }); +}; + +TableResult.parse = function (response) { + var result = null; + if (response.body) { + result = odataHandler.parseJsonTables(response.body); + } + + return result; +}; + +exports = module.exports = TableResult; \ No newline at end of file diff --git a/src/node_modules/azure-storage/lib/services/table/tablebatch.js b/src/node_modules/azure-storage/lib/services/table/tablebatch.js new file mode 100644 index 0000000..64d0861 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/tablebatch.js @@ -0,0 +1,209 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var extend = require('extend'); + +var azureCommon = require('./../../common/common.core'); +var SR = azureCommon.SR; +var validate = azureCommon.validate; +var Constants = azureCommon.Constants; +var TableConstants = Constants.TableConstants; + +/** +* Creates a new TableBatch. +* +* @constructor +*/ +function TableBatch() { + this.operations = []; + this.pk = null; + this.retrieve = false; +} + +/** +* Removes all of the operations from the batch. +*/ +TableBatch.prototype.clear = function () { + this.operations = []; +}; + +/** +* Returns a boolean value indicating weather there are operations in the batch. +* +* @return {Boolean} True if there are operations queued up; false otherwise. +*/ +TableBatch.prototype.hasOperations = function () { + return this.operations.length > 0; +}; + +/** +* Returns the number of operations in the batch. +* +* @return {number} The number of operations in the batch. +*/ +TableBatch.prototype.size = function () { + return this.operations.length; +}; + +/** +* Adds a retrieve operation to the batch. Note that this must be the only operation in the batch. +* +* @param {string} partitionKey The partition key. +* @param {string} rowKey The row key. +* @param {object} [options] The request options. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, +* and the property Edm type if given by the service, returns the Edm type of the property. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Given the single entity returned by the query, returns a modified object. +*/ +TableBatch.prototype.retrieveEntity = function (partitionKey, rowKey, options) { + var entity = { PartitionKey: {_: partitionKey, $: 'Edm.String'}, + RowKey: {_: rowKey, $: 'Edm.String'}, + }; + this.addOperation(TableConstants.Operations.RETRIEVE, entity, options); +}; + +/** +* Adds an insert operation to the batch. +* +* @param {object} entity The entity. +* @param {object} [options] The request options. +* @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Inserts only, default to false. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Only applied if echoContent is true. Given the partition key, row key, property name, +* property value, and the property Edm type if given by the service, returns the Edm type of the property. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Only applied if echoContent is true. Given the single entity returned by the insert, returns +* a modified object. +*/ +TableBatch.prototype.insertEntity = function (entity, options) { + this.addOperation(TableConstants.Operations.INSERT, entity, options); +}; + +/** +* Adds a delete operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.deleteEntity = function (entity) { + this.addOperation(TableConstants.Operations.DELETE, entity); +}; + +/** +* Adds a merge operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.mergeEntity = function (entity) { + this.addOperation(TableConstants.Operations.MERGE, entity); +}; + +/** +* Adds an replace operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.replaceEntity = function (entity) { + this.addOperation(TableConstants.Operations.REPLACE, entity); +}; + +/** +* Adds an insert or replace operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.insertOrReplaceEntity = function (entity) { + this.addOperation(TableConstants.Operations.INSERT_OR_REPLACE, entity); +}; + +/** +* Adds an insert or merge operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.insertOrMergeEntity = function (entity) { + this.addOperation(TableConstants.Operations.INSERT_OR_MERGE, entity); +}; + +/** +* Adds an operation to the batch after performing checks. +* +* @param {string} operationType The type of operation to perform. See Constants.TableConstants.Operations +* @param {object} entity The entity. +* @param {object} [options] The request options. +*/ +TableBatch.prototype.addOperation = function (operationType, entity, options) { + validate.validateArgs('addOperation', function (v) { + v.object(entity, 'entity'); + v.object(entity.PartitionKey, 'entity.PartitionKey'); + v.object(entity.RowKey, 'entity.RowKey'); + v.stringAllowEmpty(entity.PartitionKey._, 'entity.PartitionKey._'); + v.stringAllowEmpty(entity.RowKey._, 'entity.RowKey._'); + }); + + if(this.operations.length >= 100) { + throw new Error(SR.BATCH_TOO_LARGE); + } + + if (operationType === TableConstants.Operations.RETRIEVE) { + if(this.hasOperations()) { + throw new Error(SR.BATCH_ONE_RETRIEVE); + } else { + this.retrieve = true; + } + } else if (this.retrieve) { + throw new Error(SR.BATCH_ONE_RETRIEVE); + } + + if (!this.hasOperations()) { + this.pk = entity.PartitionKey._; + } else if (entity.PartitionKey._ !== this.pk) { + throw new Error(SR.BATCH_ONE_PARTITION_KEY); + } + + var copiedOptions = extend(true, {}, options); + this.operations.push({type: operationType, entity: entity, options: copiedOptions}); +}; + +/** +* Gets an operation from the batch. Returns null if the index does not exist. +* +* @param {number} index The index in the operations array at which to remove an element. +* @return {object} The removed operation. +*/ +TableBatch.prototype.getOperation = function (index) { + return this.operations[index]; +}; + +/** +* Removes an operation from the batch. Returns null if the index does not exist. +* +* @param {number} index The index in the operations array at which to remove an element. +* @return {object} The removed operation. +*/ +TableBatch.prototype.removeOperation = function (index) { + var operation = this.operations.splice(index, 1)[0]; + + // if the array is empty, unlock the partition key + if (!this.hasOperations()) { + this.pk = null; + this.retrieve = false; + } + + return operation; +}; + +module.exports = TableBatch; diff --git a/src/node_modules/azure-storage/lib/services/table/tablequery.js b/src/node_modules/azure-storage/lib/services/table/tablequery.js new file mode 100644 index 0000000..f24794e --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/tablequery.js @@ -0,0 +1,419 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = require('underscore'); +var util = require('util'); + +var azureCommon = require('./../../common/common.core'); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var QueryStringConstants = azureCommon.Constants.QueryStringConstants; + +var edmHandler = require('./internal/edmhandler'); +var TableUtilities = require('./tableutilities'); +var QueryComparisons = TableUtilities.QueryComparisons; +var TableOperators = TableUtilities.TableOperators; +var EdmType = TableUtilities.EdmType; + +/** + * Creates a new TableQuery object. + * + * @constructor + */ +function TableQuery() { + this._fields = []; + this._where = []; + this._top = null; +} + +/** +* Specifies the select clause. If no arguments are given, all fields will be selected. +* +* @param {array} fields The fields to be selected. +* @return {TableQuery} A table query object with the select clause. +* @example +* var tableQuery = new TableQuery().select('field1', 'field2'); +*/ +TableQuery.prototype.select = function () { + var self = this; + if (arguments) { + _.each(arguments, function (argument) { + self._fields.push(argument); + }); + } + + return this; +}; + +/** + * Specifies the top clause. + * + * @param {int} top The number of items to fetch. + * @return {TableQuery} A table query object with the top clause. + * @example + * var tableQuery = new TableQuery().top(10); + * + * // tasktable should already exist and have entities + * tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result) { + * if(!error) { + * var entities = result.entities; // there will be 10 or less entities + * // do stuff with the returned entities if there are any + * // if result.continuationToken exists, to get the next 10 (or less) entities + * // call queryEntities as above, but with the returned token instead of null + * } + * }); + */ +TableQuery.prototype.top = function (top) { + this._top = top; + return this; +}; + +/** + * Specifies the where clause. + * + * Valid type specifier strings include: ?string?, ?bool?, ?int32?, ?double?, ?date?, ?guid?, ?int64?, ?binary? + * A type must be specified for guid, int64, and binaries or the filter produced will be incorrect. + * + * @param {string} condition The condition string. + * @param {string|array} value Value(s) to insert in question mark (?) parameters. + * @return {TableQuery} A table query object with the where clause. + * @example + * var tableQuery = new TableQuery().where(TableQuery.guidFilter('GuidField', QueryComparisons.EQUAL, guidVal)); + * OR + * var tableQuery = new TableQuery().where('Name == ? or Name <= ?', name1, name2); + * OR + * var tableQuery = new TableQuery().where('Name == ?string? && Value == ?int64?', name1, int64Val); + * + * // tasktable should already exist and have entities + * tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result, response) { + * if(!error) { + * var entities = result.entities; + * // do stuff with the returned entities if there are any + * } + * }); + */ +TableQuery.prototype.where = function (condition) { + this._where.push(TableQuery._encodeConditionString(condition, arguments)); + return this; +}; + +/** + * Generates a property filter condition string for an 'int' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|int} value An 'int' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.int32Filter('IntField', QueryComparisons.EQUAL, 5); + */ +TableQuery.int32Filter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.INT32); +}; + +/** + * Generates a property filter condition string for a 'int64' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|int64} value An 'int64' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.int64Filter('Int64Field', QueryComparisons.EQUAL, 123); + */ +TableQuery.int64Filter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.INT64); +}; + +/** + * Generates a property filter condition string for a 'double' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|double}value A 'double' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.doubleFilter('DoubleField', QueryComparisons.EQUAL, 123.45); + */ +TableQuery.doubleFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.DOUBLE); +}; + +/** + * Generates a property filter condition string for a 'boolean' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|boolean} value A 'boolean' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.booleanFilter('BooleanField', QueryComparisons.EQUAL, false); + */ +TableQuery.booleanFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.BOOLEAN); +}; + +/** + * Generates a property filter condition string for a 'datetime' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|date} value A 'datetime' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.dateFilter('DateTimeField', QueryComparisons.EQUAL, new Date(Date.UTC(2001, 1, 3, 4, 5, 6))); + */ +TableQuery.dateFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.DATETIME); +}; + +/** + * Generates a property filter condition string for a 'guid' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|guid} value A 'guid' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.guidFilter('GuidField', QueryComparisons.EQUAL, guid.v1()); + */ +TableQuery.guidFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.GUID); +}; + +/** + * Generates a property filter condition string for a 'binary' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|buffer}value A 'buffer' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.binaryFilter('BinaryField', QueryComparisons.EQUAL, Buffer.from('hello')); + */ +TableQuery.binaryFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.BINARY); +}; + +/** + * Generates a property filter condition string. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string} value A 'string' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.stringFilter('StringField', QueryComparisons.EQUAL, 'name'); + */ +TableQuery.stringFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.STRING); +}; + +/** + * Creates a filter condition using the specified logical operator on two filter conditions. + * + * @param {string} filterA A string containing the first formatted filter condition. + * @param {string} operatorString A string containing the operator to use (AND, OR). + * @param {string} filterB A string containing the second formatted filter condition. + * @return {string} A string containing the combined filter expression. + * @example + * var filter1 = TableQuery.stringFilter('Name', QueryComparisons.EQUAL, 'Person'); + * var filter2 = TableQuery.booleanFilter('Visible', QueryComparisons.EQUAL, true); + * var combinedFilter = TableQuery.combineFilters(filter1, TableUtilities.TableOperators.AND, filter2); + */ +TableQuery.combineFilters = function (filterA, operatorString, filterB) { + return filterA + ' ' + operatorString + ' ' + filterB; +}; + +/** + * Specifies an AND where condition. + * + * @param {string} condition The condition string. + * @param {array} arguments Any number of arguments to be replaced in the condition by the question mark (?). + * @return {TableQuery} A table query object with the and clause. + * @example + * var tableQuery = new TableQuery() + * .where('Name == ? or Name <= ?', 'Person1', 'Person2'); + * .and('Age >= ?', 18); + */ +TableQuery.prototype.and = function (condition) { + if (this._where.length === 0) { + throw new Error(util.format(SR.QUERY_OPERATOR_REQUIRES_WHERE, 'AND')); + } + + this._where.push(' and ' + TableQuery._encodeConditionString(condition, arguments)); + return this; +}; + +/** + * Specifies an OR where condition. + * + * @param {string} condition The condition. + * @param {array} arguments Any number of arguments to be replaced in the condition by the question mark (?). + * @return {TableQuery} A table query object with the or clause. + * @example + * var tableQuery = new TableQuery() + * .where('Name == ? or Name <= ?', 'Person1', 'Person2'); + * .or('Age >= ?', 18); + */ +TableQuery.prototype.or = function (condition) { + if (this._where.length === 0) { + throw new Error(util.format(SR.QUERY_OPERATOR_REQUIRES_WHERE, 'OR')); + } + + this._where.push(' or ' + TableQuery._encodeConditionString(condition, arguments)); + return this; +}; + +/** + * Returns the query string object for the query. + * + * @return {object} JSON object representing the query string arguments for the query. + */ +TableQuery.prototype.toQueryObject = function () { + var query = {}; + if (this._fields.length > 0) { + query[QueryStringConstants.SELECT] = this._fields.join(','); + } + + if (this._where.length > 0) { + query[QueryStringConstants.FILTER] = this._where.join(''); + } + + if (this._top) { + query[QueryStringConstants.TOP] = this._top; + } + + return query; +}; + +// Functions + +/** +* Concat the filter string parameters. +* +* @param {string} propertyName A string containing the name of the property to compare. +* @param {string} operation A string containing the comparison operator to use. +* See Constants.TableConstants.QueryComparisons for a list of allowed operations. +* @param {object} value The value to compare with the property. +* @param {string} type A string EdmType of the property to compare. +* @return {string} A string containing the formatted filter condition. +* @ignore +*/ +TableQuery._concatFilterString = function (propertyName, operation, value, type) { + if (azureutil.objectIsNull(propertyName)) { + throw new Error(util.format(SR.ARGUMENT_NULL_OR_UNDEFINED, 'propertyName')); + } + + if (azureutil.objectIsNull(operation)) { + throw new Error(util.format(SR.ARGUMENT_NULL_OR_UNDEFINED, 'operation')); + } + + if (azureutil.objectIsNull(value)) { + throw new Error(util.format(SR.ARGUMENT_NULL_OR_UNDEFINED, 'value')); + } + + var serializedValue = edmHandler.serializeQueryValue(value, type); + return propertyName + ' ' + operation + ' ' + serializedValue; +}; + +/** + * Encodes a condition string. + * + * @param {string} condition The condition. + * @param {array} arguments Any number of arguments to be replaced in the condition by the question mark (?). + * @return {TableQuery} A table query object with the or clause + * @ignore + */ +TableQuery._encodeConditionString = function (condition, args) { + var encodedCondition = TableQuery._replaceOperators(condition); + if (args.length > 1) { + var sections = encodedCondition.split(/(\?string\?|\?int32\?|\?int64\?|\?bool\?|\?double\?|\?date\?|\?binary\?|\?guid\?|\?)/); + var count = 1; + for (var i = 0; i < sections.length && count < args.length; i++) { + if (sections[i].indexOf('?') === 0) { + var type = TableQuery._getEdmType(sections[i]); + sections[i] = edmHandler.serializeQueryValue(args[count], type); + count++; + } + } + encodedCondition = sections.join(''); + } + + return encodedCondition; +}; + +/** + * Converts the query string type to an Edm type. + * + * @param {string} type The type included in the query string. + * @return {string} The EdmType. + * @ignore + */ +TableQuery._getEdmType = function (type) { + switch (type) { + case '?binary?': + return EdmType.BINARY; + case '?int64?': + return EdmType.INT64; + case '?date?': + return EdmType.DATETIME; + case '?guid?': + return EdmType.GUID; + case '?int32?': + return EdmType.INT32; + case '?double?': + return EdmType.DOUBLE; + case '?bool?': + return EdmType.BOOLEAN; + case '?string?': + return EdmType.STRING; + default: + return undefined; + } +}; + +/** + * Replace operators. + * @ignore + * @param {string} whereClause The text where to replace the operators. + * @return {string} The string with the replaced operators. + * @ignore + */ +TableQuery._replaceOperators = function (whereClause) { + var encodedWhereClause = whereClause.replace(/ == /g, ' ' + QueryComparisons.EQUAL + ' '); + encodedWhereClause = encodedWhereClause.replace(/ != /g, ' ' + QueryComparisons.NOT_EQUAL + ' '); + encodedWhereClause = encodedWhereClause.replace(/ >= /g, ' ' + QueryComparisons.GREATER_THAN_OR_EQUAL + ' '); + encodedWhereClause = encodedWhereClause.replace(/ > /g, ' ' + QueryComparisons.GREATER_THAN + ' '); + encodedWhereClause = encodedWhereClause.replace(/ <= /g, ' ' + QueryComparisons.LESS_THAN_OR_EQUAL + ' '); + encodedWhereClause = encodedWhereClause.replace(/ < /g, ' ' + QueryComparisons.LESS_THAN + ' '); + encodedWhereClause = encodedWhereClause.replace(/ \&\& /g, ' ' + TableOperators.AND + ' '); + encodedWhereClause = encodedWhereClause.replace(/ \|\| /g, ' ' + TableOperators.OR + ' '); + encodedWhereClause = encodedWhereClause.replace(/!/g, TableOperators.NOT); + + return encodedWhereClause; +}; + +module.exports = TableQuery; diff --git a/src/node_modules/azure-storage/lib/services/table/tableservice.js b/src/node_modules/azure-storage/lib/services/table/tableservice.js new file mode 100644 index 0000000..08e9c08 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/tableservice.js @@ -0,0 +1,1393 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = require('util'); +var extend = require('extend'); +var _ = require('underscore'); + +var azureCommon = require('./../../common/common.core'); +var azureutil = azureCommon.util; +var validate = azureCommon.validate; +var SR = azureCommon.SR; +var StorageServiceClient = azureCommon.StorageServiceClient; +var SharedKeyTable = require('./internal/sharedkeytable'); +var RequestHandler = require('./internal/requesthandler'); +var TableQuery = require('./tablequery'); +var WebResource = azureCommon.WebResource; +var Constants = azureCommon.Constants; +var QueryStringConstants = Constants.QueryStringConstants; +var HeaderConstants = Constants.HeaderConstants; +var TableConstants = Constants.TableConstants; +var RequestLocationMode = Constants.RequestLocationMode; + +// Models requires +var TableResult = require('./models/tableresult'); +var entityResult = require('./models/entityresult'); +var BatchResult = require('./models/batchresult'); +var ServiceStatsParser = azureCommon.ServiceStatsParser; +var AclResult = azureCommon.AclResult; +var TableUtilities = require('./tableutilities'); + +/** +* Creates a new TableService object. +* If no connection string or storageaccount and storageaccesskey are provided, +* the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. +* @class +* The TableService object allows you to peform management operations with the Microsoft Azure Table Service. +* The Table Service stores data in rows of key-value pairs. A table is composed of multiple rows, and each row +* contains key-value pairs. There is no schema, so each row in a table may store a different set of keys. +* +* For more information on the Table Service, as well as task focused information on using it from a Node.js application, see +* [How to Use the Table Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-table-storage/). +* The following defaults can be set on the Table service. +* defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Table service. +* defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Table service. +* defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Table service. +* defaultLocationMode The default location mode for requests made via the Table service. +* defaultPayloadFormat The default payload format for requests made via the Table service. +* useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Table service.; true to use the +* Nagle algorithm; otherwise, false. The default value is false. +* enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use +* http(s).Agent({keepAlive:true}). +* @constructor +* @extends {StorageServiceClient} +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {string} [endpointSuffix] The endpoint suffix. +*/ +function TableService(storageAccountOrConnectionString, storageAccessKey, host, sasToken, endpointSuffix) { + var storageServiceSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey, host, sasToken, endpointSuffix); + + TableService['super_'].call(this, + storageServiceSettings._name, + storageServiceSettings._key, + storageServiceSettings._tableEndpoint, + storageServiceSettings._usePathStyleUri, + storageServiceSettings._sasToken); + + if (this.anonymous) { + throw new Error(SR.ANONYMOUS_ACCESS_BLOBSERVICE_ONLY); + } + + if(this.storageAccount && this.storageAccessKey) { + this.storageCredentials = new SharedKeyTable(this.storageAccount, this.storageAccessKey, this.usePathStyleUri); + } + + this.defaultPayloadFormat = TableUtilities.PayloadFormat.MINIMAL_METADATA; +} + +util.inherits(TableService, StorageServiceClient); + +// Table service methods + +/** +* Gets the service stats for a storage account’s Table service. +* +* @this {TableService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link ServiceStats}` will contain the stats. +* `response` will contain information related to this operation. +*/ +TableService.prototype.getServiceStats = function (optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getServiceStats', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'stats') + .withQueryOption(QueryStringConstants.RESTYPE, 'service'); + + var processResponseCallback = function (responseObject, next) { + responseObject.serviceStatsResult = null; + if (!responseObject.error) { + responseObject.serviceStatsResult = ServiceStatsParser.parse(responseObject.response.body.StorageServiceStats); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.serviceStatsResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the properties of a storage account’s Table service, including Azure Storage Analytics. +* +* @this {TableService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link ServiceProperties}` will contain the properties. +* `response` will contain information related to this operation. +*/ +TableService.prototype.getServiceProperties = function (optionsOrCallback, callback) { + return this.getAccountServiceProperties(optionsOrCallback, callback); +}; + +/** +* Sets the properties of a storage account’s Table service, including Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* +* @this {TableService} +* @param {object} serviceProperties The service properties. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +TableService.prototype.setServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + return this.setAccountServiceProperties(serviceProperties, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of table items under the specified account. +* +* @this {TableService} +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The create options or callback function. +* @param {int} [options.maxResults] Specifies the maximum number of tables to return per call to Azure ServiceClient. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of tables and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +TableService.prototype.listTablesSegmented = function (currentToken, optionsOrCallback, callback) { + this.listTablesSegmentedWithPrefix(null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of table items under the specified account. +* +* @this {TableService} +* @param {string} prefix The prefix of the table name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The create options or callback function. +* @param {int} [options.maxResults] Specifies the maximum number of tables to return per call to Azure ServiceClient. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of tables and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +TableService.prototype.listTablesSegmentedWithPrefix = function (prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listTables', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.payloadFormat = options.payloadFormat || this.defaultPayloadFormat; + + var webResource = WebResource.get(TableConstants.TABLE_SERVICE_TABLE_NAME); + RequestHandler.setTableRequestHeadersAndBody(webResource, null, options.payloadFormat); + + if(!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(TableConstants.NEXT_TABLE_NAME, currentToken.nextTableName); + } + + if(!azureutil.objectIsNull(prefix)) { + var query = new TableQuery() + .where(TableConstants.TABLE_NAME + ' >= ?', prefix) + .and(TableConstants.TABLE_NAME + ' < ?', prefix + '{'); + + webResource.withQueryOption(QueryStringConstants.FILTER, query.toQueryObject().$filter); + } + + if(!azureutil.objectIsNull(options.maxResults)) { + var query = new TableQuery().top(options.maxResults); + webResource.withQueryOption(QueryStringConstants.TOP, query.toQueryObject().$top); + } + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listTablesResult = null; + + if (!responseObject.error) { + responseObject.listTablesResult = { + entries: null, + continuationToken: null + }; + responseObject.listTablesResult.entries = TableResult.parse(responseObject.response); + + if (responseObject.response.headers[TableConstants.CONTINUATION_NEXT_TABLE_NAME] && + !azureutil.objectIsEmpty(responseObject.response.headers[TableConstants.CONTINUATION_NEXT_TABLE_NAME])) { + responseObject.listTablesResult.continuationToken = { + nextTableName: null, + targetLocation: null + }; + + responseObject.listTablesResult.continuationToken.nextTableName = responseObject.response.headers[TableConstants.CONTINUATION_NEXT_TABLE_NAME]; + responseObject.listTablesResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listTablesResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// Table Methods + +/** +* Gets the table's ACL. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the ACL information for the table. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.getTableAcl = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getTableAcl', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var webResource = WebResource.get(table) + .withQueryOption(QueryStringConstants.COMP, 'acl'); + + var processResponseCallback = function (responseObject, next) { + responseObject.tableResult = null; + if (!responseObject.error) { + responseObject.tableResult = new TableResult(table); + responseObject.tableResult.signedIdentifiers = AclResult.parse(responseObject.response.body); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.tableResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the table's ACL. +* +* @this {TableService} +* @param {string} table The table name. +* @param {Object.} signedIdentifiers The table ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain information for the table. +* `response` will contain information related to this operation. +*/ +TableService.prototype.setTableAcl = function (table, signedIdentifiers, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setTableAcl', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var policies = null; + if (signedIdentifiers) { + if(_.isArray(signedIdentifiers)) { + throw new TypeError(SR.INVALID_SIGNED_IDENTIFIERS); + } + policies = AclResult.serialize(signedIdentifiers); + } + + var webResource = WebResource.put(table) + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.CONTENT_LENGTH, !azureutil.objectIsNull(policies) ? Buffer.byteLength(policies) : 0) + .withBody(policies); + + var processResponseCallback = function (responseObject, next) { + responseObject.tableResult = null; + if (!responseObject.error) { + + // SetTableAcl doesn't actually return anything in the response + responseObject.tableResult = new TableResult(table); + if (signedIdentifiers) { + responseObject.tableResult.signedIdentifiers = signedIdentifiers; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.tableResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieves a shared access signature token. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired (The UTC value will be used). +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {string} [sharedAccessPolicy.AccessPolicy.StartPk] The starting Partition Key for which the SAS will be valid. +* @param {string} [sharedAccessPolicy.AccessPolicy.EndPk] The ending Partition Key for which the SAS will be valid. +* @param {string} [sharedAccessPolicy.AccessPolicy.StartRk] The starting Row Key for which the SAS will be valid. +* @param {string} [sharedAccessPolicy.AccessPolicy.EndRk] The ending Row Key for which the SAS will be valid. +* @return {object} An object with the shared access signature. +*/ +TableService.prototype.generateSharedAccessSignature = function (table, sharedAccessPolicy) { + // check if the TableService is able to generate a shared access signature + if (!this.storageCredentials || !this.storageCredentials.generateSignedQueryString) { + throw new Error(SR.CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY); + } + + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.object(sharedAccessPolicy, 'sharedAccessPolicy'); + }); + + var lowerCasedTableName = table.toLowerCase(); + return this.storageCredentials.generateSignedQueryString(Constants.ServiceType.Table, lowerCasedTableName, sharedAccessPolicy, null, { tableName: lowerCasedTableName }); +}; + +/** +* Checks whether or not a table exists on the service. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the table information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +TableService.prototype.doesTableExist = function (table, optionsOrCallback, callback) { + this._doesTableExist(table, false, optionsOrCallback, callback); +}; + +/** +* Creates a new table within a storage account. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the new table information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.createTable = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createTable', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var tableDescriptor = TableResult.serialize(table); + + var webResource = WebResource.post('Tables') + .withHeader(HeaderConstants.PREFER, HeaderConstants.PREFER_NO_CONTENT); + + RequestHandler.setTableRequestHeadersAndBody(webResource, tableDescriptor, this.defaultPayloadFormat); + + var processResponseCallback = function (responseObject, next) { + responseObject.tableResponse = {}; + responseObject.tableResponse.isSuccessful = responseObject.error ? false : true; + responseObject.tableResponse.statusCode = responseObject.response === null || responseObject.response === undefined ? undefined : responseObject.response.statusCode; + if (!responseObject.error) { + responseObject.tableResponse.TableName = table; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.tableResponse, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Creates a new table within a storage account if it does not exists. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* `result` will contain the table information including `created` boolean member +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* tableService.createTableIfNotExists('tasktable', function(error) { +* if(!error) { +* // Table created or exists +* } +* }); +*/ +TableService.prototype.createTableIfNotExists = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createTableIfNotExists', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var self = this; + self._doesTableExist(table, true, options, function(error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if (error) { + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createTable(table, options, function(createError, createResult, response) { + if (!createError) { + createResult.created = true; + } + else if (createError && createError.statusCode === Constants.HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.TableErrorCodeStrings.TABLE_ALREADY_EXISTS) { + createError = null; + createResult.created = false; + createResult.isSuccessful = true; + } + callback(createError, createResult, response); + }); + } + }); +}; + +/** +* Deletes a table from a storage account. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +TableService.prototype.deleteTable = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteTable', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var webResource = WebResource.del('Tables(\'' + table + '\')'); + RequestHandler.setTableRequestHeadersAndBody(webResource, null, this.defaultPayloadFormat); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Deletes a table from a storage account, if it exists. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* `result` will be `true` if table was deleted, false otherwise +* `response` will contain information related to this operation. +*/ +TableService.prototype.deleteTableIfExists = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteTableIfExists', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var self = this; + self._doesTableExist(table, true, options, function(error, result, response) { + if (error) { + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteTable(table, options, function(deleteError, deleteResponse) { + var deleted; + if (!deleteError) { + deleted = true; + } else if (deleteError && deleteError.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.StorageErrorCodeStrings.RESOURCE_NOT_FOUND) { + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +// Table Entity Methods + +/** +* Queries data in a table. To retrieve a single entity by partition key and row key, use retrieve entity. +* +* @this {TableService} +* @param {string} table The table name. +* @param {TableQuery} tableQuery The query to perform. Use null, undefined, or new TableQuery() to get all of the entities in the table. +* @param {object} currentToken A continuation token returned by a previous listing operation. +* Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {bool} [options.autoResolveProperties] If true, guess at all property types. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Given a single entity returned by the query, returns a modified object which is added to +* the entities array. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, +* and the property Edm type if given by the service, returns the Edm type of the property. +* @param {TableService~queryResponse} callback `error` will contain information if an error occurs; +* otherwise `entries` will contain the entities returned by the query. +* If more matching entities exist, and could not be returned, +* `queryResultContinuation` will contain a continuation token that can be used +* to retrieve the next set of results. +* `response` will contain information related to this operation. +* +* The logic for returning entity types can get complicated. Here is the algorithm used: +* ``` +* var propertyType; +* +* if (propertyResovler) { // If the caller provides a propertyResolver in the options, use it +* propertyType = propertyResolver(partitionKey, rowKey, propertyName, propertyValue, propertyTypeFromService); +* } else if (propertyTypeFromService) { // If the service provides us a property type, use it. See below for an explanation of when this will and won't occur. +* propertyType = propertyTypeFromService; +* } else if (autoResolveProperties) { // If options.autoResolveProperties is set to true +* if (javascript type is string) { // See below for an explanation of how and why autoResolveProperties works as it does. +* propertyType = 'Edm.String'; +* } else if (javascript type is boolean) { +* propertyType = 'Edm.Boolean'; +* } +* } +* +* if (propertyType) { +* // Set the property type on the property. +* } else { +* // Property gets no EdmType. +* } +* ``` +* Notes: +* +* * The service only provides a type if JsonFullMetadata or JsonMinimalMetadata is used, and if the type is Int64, Guid, Binary, or DateTime. +* * Explanation of autoResolveProperties: +* * String gets correctly resolved to 'Edm.String'. +* * Int64, Guid, Binary, and DateTime all get resolved to 'Edm.String.' This only happens if JsonNoMetadata is used (otherwise the service will provide the propertyType in a prior step). +* * Boolean gets correctly resolved to 'Edm.Boolean'. +* * For both Int32 and Double, no type information is returned, even in the case of autoResolveProperties = true. This is due to an +* inability to distinguish between the two in certain cases. +* +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* // tasktable should already exist and have entities +* +* // returns all entities in tasktable, and a continuation token for the next page of results if necessary +* tableService.queryEntities('tasktable', null, null \/*currentToken*\/, function(error, result) { +* if(!error) { +* var entities = result.entries; +* // do stuff with the returned entities if there are any +* } +* }); +* +* // returns field1 and field2 of the entities in tasktable, and a continuation token for the next page of results if necessary +* var tableQuery = new TableQuery().select('field1', 'field2'); +* tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result) { +* if(!error) { +* var entities = result.entries; +* // do stuff with the returned entities if there are any +* } +* }); +*/ +TableService.prototype.queryEntities = function (table, tableQuery, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('queryEntities', function (v) { + v.string(table, 'table'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.payloadFormat = options.payloadFormat || this.defaultPayloadFormat; + + var webResource = WebResource.get(table); + RequestHandler.setTableRequestHeadersAndBody(webResource, null, options.payloadFormat); + + if (tableQuery) { + var queryString = tableQuery.toQueryObject(); + Object.keys(queryString).forEach(function (queryStringName) { + webResource.withQueryOption(queryStringName, queryString[queryStringName]); + }); + } + + if(!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(TableConstants.NEXT_PARTITION_KEY, currentToken.nextPartitionKey); + webResource.withQueryOption(TableConstants.NEXT_ROW_KEY, currentToken.nextRowKey); + } + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.queryEntitiesResult = null; + if (!responseObject.error) { + responseObject.queryEntitiesResult = { + entries: null, + continuationToken: null + }; + + // entries + responseObject.queryEntitiesResult.entries = entityResult.parseQuery(responseObject.response, options.autoResolveProperties, options.propertyResolver, options.entityResolver); + + // continuation token + var continuationToken = { + nextPartitionKey: responseObject.response.headers[TableConstants.CONTINUATION_NEXT_PARTITION_KEY], + nextRowKey: responseObject.response.headers[TableConstants.CONTINUATION_NEXT_ROW_KEY], + targetLocation: responseObject.targetLocation + }; + + if (!azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(continuationToken.nextPartitionKey)) { + responseObject.queryEntitiesResult.continuationToken = continuationToken; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queryEntitiesResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Retrieves an entity from a table. +* +* @this {TableService} +* @param {string} table The table name. +* @param {string} partitionKey The partition key. +* @param {string} rowKey The row key. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {bool} [options.autoResolveProperties] If true, guess at all property types. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, +* and the property Edm type if given by the service, returns the Edm type of the property. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Given the single entity returned by the query, returns a modified object. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will be the matching entity. +* `response` will contain information related to this operation. +* +* The logic for returning entity types can get complicated. Here is the algorithm used: +* ``` +* var propertyType; +* +* if (propertyResovler) { // If the caller provides a propertyResolver in the options, use it +* propertyType = propertyResolver(partitionKey, rowKey, propertyName, propertyValue, propertyTypeFromService); +* } else if (propertyTypeFromService) { // If the service provides us a property type, use it. See below for an explanation of when this will and won't occur. +* propertyType = propertyTypeFromService; +* } else if (autoResolveProperties) { // If options.autoResolveProperties is set to true +* if (javascript type is string) { // See below for an explanation of how and why autoResolveProperties works as it does. +* propertyType = 'Edm.String'; +* } else if (javascript type is boolean) { +* propertyType = 'Edm.Boolean'; +* } +* } +* +* if (propertyType) { +* // Set the property type on the property. +* } else { +* // Property gets no EdmType. +* } +* ``` +* Notes: +* +* * The service only provides a type if JsonFullMetadata or JsonMinimalMetadata is used, and if the type is Int64, Guid, Binary, or DateTime. +* * Explanation of autoResolveProperties: +* * String gets correctly resolved to 'Edm.String'. +* * Int64, Guid, Binary, and DateTime all get resolved to 'Edm.String.' This only happens if JsonNoMetadata is used (otherwise the service will provide the propertyType in a prior step). +* * Boolean gets correctly resolved to 'Edm.Boolean'. +* * For both Int32 and Double, no type information is returned, even in the case of autoResolveProperties = true. This is due to an +* inability to distinguish between the two in certain cases. +* +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* tableService.retrieveEntity('tasktable', 'tasksSeattle', '1', function(error, serverEntity) { +* if(!error) { +* // Entity available in serverEntity variable +* } +* }); +*/ +TableService.prototype.retrieveEntity = function (table, partitionKey, rowKey, optionsOrCallback, callback) { + var entityDescriptor = { PartitionKey: {_: partitionKey, $: 'Edm.String'}, + RowKey: {_: rowKey, $: 'Edm.String'}, + }; + + validate.validateArgs('retrieveEntity', function (v) { + v.stringAllowEmpty(partitionKey, 'partitionKey'); + v.stringAllowEmpty(rowKey, 'rowKey'); + }); + + this._performEntityOperation(TableConstants.Operations.RETRIEVE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Inserts a new entity into a table. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Default to false. +* @param {string} [options.payloadFormat] The payload format to use in the response, if options.echoContent is true. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Only applied if echoContent is true. Given the partition key, row key, property name, +* property value, and the property Edm type if given by the service, returns the Edm type of the property. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Only applied if echoContent is true. Given the single entity returned by the insert, returns +* a modified object. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* var task1 = { +* PartitionKey : {'_': 'tasksSeattle', '$':'Edm.String'}, +* RowKey: {'_': '1', '$':'Edm.String'}, +* Description: {'_': 'Take out the trash', '$':'Edm.String'}, +* DueDate: {'_': new Date(2011, 12, 14, 12), '$':'Edm.DateTime'} +* }; +* tableService.insertEntity('tasktable', task1, function(error) { +* if(!error) { +* // Entity inserted +* } +* }); +*/ +TableService.prototype.insertEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.INSERT, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Inserts or updates a new entity into a table. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.insertOrReplaceEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.INSERT_OR_REPLACE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Replaces an existing entity within a table. To replace conditionally based on etag, set entity['.metadata']['etag']. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.replaceEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.REPLACE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Updates an existing entity within a table by merging new property values into the entity. To merge conditionally based on etag, set entity['.metadata']['etag']. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* response` will contain information related to this operation. +*/ +TableService.prototype.mergeEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.MERGE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Inserts or updates an existing entity within a table by merging new property values into the entity. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.insertOrMergeEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.INSERT_OR_MERGE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Deletes an entity within a table. To delete conditionally based on etag, set entity['.metadata']['etag']. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +TableService.prototype.deleteEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.DELETE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Executes the operations in the batch. +* +* @this {TableService} +* @param {string} table The table name. +* @param {TableBatch} batch The table batch to execute. +* @param {object} [options] The create options or callback function. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain responses for each operation executed in the batch; +* `result.entity` will contain the entity information for each operation executed. +* `result.response` will contain the response for each operations executed. +* `response` will contain information related to this operation. +*/ +TableService.prototype.executeBatch = function (table, batch, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('executeBatch', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.object(batch, 'batch'); + v.callback(callback); + }); + + if(!batch.hasOperations()) { + throw new Error(SR.EMPTY_BATCH); + } + + var options = extend(true, {}, userOptions); + + var batchResult = new BatchResult(this, table, batch.operations); + var webResource = batchResult.constructWebResource(); + + var body = batchResult.serialize(); + webResource.withBody(body); + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(body, 'utf8')); + + var processResponseCallback = function (responseObject, next) { + var responseObjects = batchResult.parse(responseObject); + + var noError = true; + // if the batch was unsuccesful, there will be a single response indicating the error + if (responseObjects && responseObjects.length > 0) { + responseObjects.forEach(function(item){ + if(noError && !item.response.isSuccessful){ + responseObject = item; + noError = false; + } + }); + } + + if (noError) { + responseObject.operationResponses = responseObjects; + } + + var finalCallback = function (returnObject) { + // perform final callback + callback(returnObject.error, returnObject.operationResponses, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +// Private methods + +/** +* Checks whether or not a table exists on the service. +* @ignore +* +* @this {TableService} +* @param {string} table The table name. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information if an error occurs; +* otherwise `result` will contain +* the table information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +TableService.prototype._doesTableExist = function (table, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('doesTableExist', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + var webResource = WebResource.get('Tables(\'' + table + '\')'); + webResource.withHeader(HeaderConstants.ACCEPT, this.defaultPayloadFormat); + + var processResponseCallback = function (responseObject, next) { + responseObject.tableResult = {}; + responseObject.tableResult.isSuccessful = responseObject.error ? false : true; + responseObject.tableResult.statusCode = responseObject.response === null || responseObject.response === undefined ? undefined : responseObject.response.statusCode; + responseObject.tableResult.TableName = table; + + if(!responseObject.error){ + responseObject.tableResult.exists = true; + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.tableResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.tableResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Performs a table operation. +* +* @this {TableService} +* @param {string} operation The operation to perform. +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The create options or callback function. +* @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Default to false. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `entity` will contain the entity information. +* `response` will contain information related to this operation. +* @ignore +*/ +TableService.prototype._performEntityOperation = function (operation, table, entityDescriptor, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('entityOperation', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.object(entityDescriptor, 'entityDescriptor'); + + if(typeof entityDescriptor.PartitionKey !== 'string') { + v.object(entityDescriptor.PartitionKey, 'entityDescriptor.PartitionKey'); + v.stringAllowEmpty(entityDescriptor.PartitionKey._, 'entityDescriptor.PartitionKey._'); + } + + if(typeof entityDescriptor.RowKey !== 'string') { + v.object(entityDescriptor.RowKey, 'entityDescriptor.RowKey'); + v.stringAllowEmpty(entityDescriptor.RowKey._, 'entityDescriptor.RowKey._'); + } + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.payloadFormat = options.payloadFormat || this.defaultPayloadFormat; + + var webResource = RequestHandler.constructEntityWebResource(operation, table, entityDescriptor, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback; + if (operation === TableConstants.Operations.DELETE) { + finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + } else { + responseObject.entityResponse = null; + if (!responseObject.error) { + responseObject.entityResponse = entityResult.parseEntity(responseObject.response, options.autoResolveProperties, options.propertyResolver, options.entityResolver); + } + + finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.entityResponse, returnObject.response); + }; + } + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieves a table URL. +* +* @param {string} table The table name. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. +* @return {string} The formatted URL string. +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* var sharedAccessPolicy = { +* AccessPolicy: { +* Permissions: azure.TableUtilities.SharedAccessPermissions.QUERY, +* Start: startDate, +* Expiry: expiryDate +* }, +* }; +* +* var sasToken = tableService.generateSharedAccessSignature(table, sharedAccessPolicy); +* var sasUrl = tableService.getUrl(table, sasToken); +*/ +TableService.prototype.getUrl = function (table, sasToken, primary) { + validate.validateArgs('getUrl', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + }); + + return this._getUrl(table, sasToken, primary); +}; + +/** +* Given the partition key, row key, property name, property value, +* and the property Edm type if given by the service, returns the Edm type of the property. +* @typedef {function} TableService~propertyResolver +* @param {object} pk The partition key. +* @param {object} rk The row key. +* @param {string} name The property name. +* @param {object} value The property value. +* @param {string} type The EDM type. +*/ + +/** +* Returns entities matched by a query. +* @callback TableService~queryResponse +* @param {object} error If an error occurs, the error information. +* @param {object} entries The entities returned by the query. +* @param {object} queryResultContinuation If more matching entities exist, and could not be returned, +* a continuation token that can be used to retrieve more results. +* @param {object} response Information related to this operation. +*/ + +module.exports = TableService; diff --git a/src/node_modules/azure-storage/lib/services/table/tableutilities.js b/src/node_modules/azure-storage/lib/services/table/tableutilities.js new file mode 100644 index 0000000..ecf7e24 --- /dev/null +++ b/src/node_modules/azure-storage/lib/services/table/tableutilities.js @@ -0,0 +1,154 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'HeaderConstants'. +exports = module.exports; + +/** +* Defines constants, enums, and utility functions for use with the Table service. +* @namespace TableUtilities +*/ +var TableUtilities = { + /** + * Permission types. + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + QUERY: 'r', + ADD: 'a', + UPDATE: 'u', + DELETE: 'd' + }, + + /** + * Payload Format. + * + * @const + * @enum {string} + */ + PayloadFormat: { + FULL_METADATA: 'application/json;odata=fullmetadata', + MINIMAL_METADATA: 'application/json;odata=minimalmetadata', + NO_METADATA: 'application/json;odata=nometadata' + }, + + /** + * Defines the set of Boolean operators for constructing queries. + * + * @const + * @enum {string} + */ + TableOperators: { + AND: 'and', + NOT: 'not', + OR: 'or' + }, + + /** + * Filter property comparison operators. + * + * @const + * @enum {string} + */ + QueryComparisons: { + EQUAL: 'eq', + NOT_EQUAL: 'ne', + GREATER_THAN: 'gt', + GREATER_THAN_OR_EQUAL: 'ge', + LESS_THAN: 'lt', + LESS_THAN_OR_EQUAL: 'le' + }, + + /** + * Edm types. + * + * @const + * @enum {string} + */ + EdmType: { + STRING: 'Edm.String', + BINARY: 'Edm.Binary', + INT64: 'Edm.Int64', + INT32: 'Edm.Int32', + DOUBLE: 'Edm.Double', + DATETIME: 'Edm.DateTime', + GUID: 'Edm.Guid', + BOOLEAN: 'Edm.Boolean' + }, + + /** + * A helper to create table entities. + * + * @example + * var entGen = TableUtilities.entityGenerator; + * var entity = { PartitionKey: entGen.String('part2'), + * RowKey: entGen.String('row1'), + * boolValue: entGen.Boolean(true), + * intValue: entGen.Int32(42), + * dateValue: entGen.DateTime(new Date(Date.UTC(2011, 10, 25))), + * }; + */ + entityGenerator: (function() + { + var EntityProperty = function (value, type) { + var entityProperty = { _:value}; + if (type) { + entityProperty['$'] = type; + } + return entityProperty; + }; + + return { + EntityProperty : EntityProperty, + + Int32 : function(value) { + return new EntityProperty(value, 'Edm.Int32'); + }, + + Int64 : function(value) { + return new EntityProperty(value, 'Edm.Int64'); + }, + + Binary : function(value) { + return new EntityProperty(value, 'Edm.Binary'); + }, + + Boolean : function(value) { + return new EntityProperty(value, 'Edm.Boolean'); + }, + + String : function(value) { + return new EntityProperty(value, 'Edm.String'); + }, + + Guid : function(value) { + return new EntityProperty(value, 'Edm.Guid'); + }, + + Double : function(value) { + return new EntityProperty(value, 'Edm.Double'); + }, + + DateTime : function(value) { + return new EntityProperty(value, 'Edm.DateTime'); + } + }; + })() +}; + +module.exports = TableUtilities; \ No newline at end of file diff --git a/src/node_modules/azure-storage/package.json b/src/node_modules/azure-storage/package.json new file mode 100644 index 0000000..edf832e --- /dev/null +++ b/src/node_modules/azure-storage/package.json @@ -0,0 +1,106 @@ +{ + "_args": [ + [ + "azure-storage@2.10.3", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "azure-storage@2.10.3", + "_id": "azure-storage@2.10.3", + "_inBundle": false, + "_integrity": "sha512-IGLs5Xj6kO8Ii90KerQrrwuJKexLgSwYC4oLWmc11mzKe7Jt2E5IVg+ZQ8K53YWZACtVTMBNO3iGuA+4ipjJxQ==", + "_location": "/azure-storage", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "azure-storage@2.10.3", + "name": "azure-storage", + "escapedName": "azure-storage", + "rawSpec": "2.10.3", + "saveSpec": null, + "fetchSpec": "2.10.3" + }, + "_requiredBy": [ + "/" + ], + "_resolved": "https://registry.npmjs.org/azure-storage/-/azure-storage-2.10.3.tgz", + "_spec": "2.10.3", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Microsoft Corporation" + }, + "bugs": { + "url": "http://github.com/Azure/azure-storage-node/issues" + }, + "dependencies": { + "browserify-mime": "~1.2.9", + "extend": "^3.0.2", + "json-edm-parser": "0.1.2", + "md5.js": "1.3.4", + "readable-stream": "~2.0.0", + "request": "^2.86.0", + "underscore": "~1.8.3", + "uuid": "^3.0.0", + "validator": "~9.4.1", + "xml2js": "0.2.8", + "xmlbuilder": "^9.0.7" + }, + "description": "Microsoft Azure Storage Client Library for Node.js", + "devDependencies": { + "batchflow": "0.4.0", + "browserify": "^16.1.1", + "coveralls": "^3.0.3", + "factor-bundle": "^2.5.0", + "grunt": "^1.0.4", + "grunt-jsdoc": "^2.3.0", + "ink-docstrap": "^1.3.0", + "istanbul": "^0.4.5", + "jshint": ">= 2.1.4", + "karma": "^4.0.1", + "karma-chrome-launcher": "^2.2.0", + "karma-edge-launcher": "^0.4.2", + "karma-env-preprocessor": "^0.1.1", + "karma-firefox-launcher": "^1.1.0", + "karma-ie-launcher": "^1.0.0", + "karma-mocha": "^1.3.0", + "karma-mocha-reporter": "^2.2.5", + "mocha": ">= 1.18.0", + "mocha-lcov-reporter": "^1.0.0", + "nock": "0.16", + "should": "1.2.x", + "uglify-js": "~3.3.9", + "watchify": "^3.11.0" + }, + "engines": { + "node": ">= 0.8.26" + }, + "homepage": "http://github.com/Azure/azure-storage-node", + "keywords": [ + "node", + "azure", + "storage" + ], + "license": "Apache-2.0", + "main": "./lib/azure-storage.js", + "name": "azure-storage", + "repository": { + "type": "git", + "url": "git+ssh://git@github.com/Azure/azure-storage-node.git" + }, + "scripts": { + "check": "jshint lib && npm set audit-level high && npm audit", + "cover": "istanbul cover ./node_modules/mocha/bin/_mocha -- -R spec -u bdd --no-timeouts --recursive test", + "coveralls": "npm run cover && cat ./coverage/lcov.info | node ./node_modules/coveralls/bin/coveralls.js", + "genjs": "node ./browser/bundle.js", + "jstest": "npm run genjs && node ./browser/test/bundle.js && karma start --single-run", + "test": "mocha --no-timeouts --recursive test" + }, + "tags": [ + "azure", + "storage", + "sdk" + ], + "typings": "typings/azure-storage/azure-storage.d.ts", + "version": "2.10.3" +} diff --git a/src/node_modules/azure-storage/tsconfig.json b/src/node_modules/azure-storage/tsconfig.json new file mode 100644 index 0000000..a0e5d93 --- /dev/null +++ b/src/node_modules/azure-storage/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "es6", + "sourceMap": true, + "outDir": "built", + "noEmitOnError": true, + "module": "commonjs" + }, + "exclude": [ + "node_modules", + "built", + "docs" + ] +} \ No newline at end of file diff --git a/src/node_modules/azure-storage/typings.json b/src/node_modules/azure-storage/typings.json new file mode 100644 index 0000000..bbce788 --- /dev/null +++ b/src/node_modules/azure-storage/typings.json @@ -0,0 +1,6 @@ +{ + "globalDependencies": { + "node": "registry:env/node#0.10.0+20160918225031", + "node-uuid": "registry:dt/node-uuid#0.0.0+20160316155526" + } +} diff --git a/src/node_modules/azure-storage/typings/azure-storage/azure-storage.d.ts b/src/node_modules/azure-storage/typings/azure-storage/azure-storage.d.ts new file mode 100644 index 0000000..ffd9706 --- /dev/null +++ b/src/node_modules/azure-storage/typings/azure-storage/azure-storage.d.ts @@ -0,0 +1,9750 @@ +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +import * as events from 'events'; +import * as url from 'url'; +import * as stream from 'stream'; + +interface Map { + [index: string]: T; +} + +interface SharedKeyGenerateSignatureArgs { + /** The resource type, if the resource is a blob or container. Null if the resource is a queue or table. */ + resourceType?: string; + /** The table name, if the resource is a table. Null if the resource is a blob orqueue. */ + tableName?: string; + /** The optional header values to set for a blob returned wth this SAS. */ + headers?: { + /** The value of the Cache-Control response header to be returned when this SAS is used. */ + CacheControl?: string; + /** The value of the Content-Type response header to be returned when this SAS is used. */ + ContentType?: string; + /** The value of the Content-Encoding response header to be returned when this SAS is used. */ + ContentEncoding?: string; + /** The value of the Content-Language response header to be returned when this SAS is used. */ + ContentLanguage?: string; + /** The value of the Content-Disposition response header to be returned when this SAS is used. */ + ContentDisposition: string; + }; +} + +interface SharedKeyGenerateQueryStringArgs extends SharedKeyGenerateSignatureArgs { + /** The query string, if additional parameters are desired. */ + queryString?: string; +} + +declare module azurestorage { + export interface StorageHost { + primaryHost: string; + secondaryHost?: string; + } + + module services { + module blob { + // ########################### + // ./services/blob/blobservice + // ########################### + module blobservice { + export class BlobService extends StorageServiceClient { + defaultEnableReuseSocket: boolean; + singleBlobPutThresholdInBytes: number; + parallelOperationThreadCount: number; + + /** + * Creates a new BlobService object. + * If no connection string or storageaccount and storageaccesskey are provided, + * the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. + * @class + * The BlobService class is used to perform operations on the Microsoft Azure Blob Service. + * The Blob Service provides storage for binary large objects, and provides + * functions for working with data stored in blobs as either streams or pages of data. + * + * For more information on the Blob Service, as well as task focused information on using it in a Node.js application, see + * [How to Use the Blob Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-blob-storage/). + * The following defaults can be set on the blob service. + * singleBlobPutThresholdInBytes The default maximum size, in bytes, of a blob before it must be separated into blocks. + * defaultEnableReuseSocket The default boolean value to enable socket reuse when uploading local files or streams. + * If the Node.js version is lower than 0.10.x, socket reuse will always be turned off. + * defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Blob service. + * defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Blob service. + * defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Blob service. + * defaultLocationMode The default location mode for requests made via the Blob service. + * parallelOperationThreadCount The number of parallel operations that may be performed when uploading a blob that is greater than + * the value specified by the singleBlobPutThresholdInBytes property in size. + * useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Blob service; true to use the + * Nagle algorithm; otherwise, false. The default value is false. + * enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use + * http(s).Agent({keepAlive:true}). + * @constructor + * @extends {StorageServiceClient} + * + * @param {string} [storageAccountOrConnectionString] The storage account or the connection string. + * @param {string} [storageAccessKey] The storage access key. + * @param {string|object} [host] The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {string} [sasToken] The Shared Access Signature token. + * @param {string} [endpointSuffix] The endpoint suffix. + */ + constructor(storageAccountOrConnectionString: string, storageAccessKey?: string, host?: string|StorageHost, sasToken?: string, endpointSuffix?: string); + + /** + * Associate a filtering operation with this BlobService. Filtering operations + * can include logging, automatically retrying, etc. Filter operations are objects + * that implement a method with the signature: + * + * "function handle (requestOptions, next)". + * + * After doing its preprocessing on the request options, the method needs to call + * "next" passing a callback with the following signature: + * signature: + * + * "function (returnObject, finalCallback, next)" + * + * In this callback, and after processing the returnObject (the response from the + * request to the server), the callback needs to either invoke next if it exists to + * continue processing other filters or simply invoke finalCallback otherwise to end + * up the service invocation. + * + * @function BlobService#withFilter + * @param {Object} filter The new filter object. + * @return {BlobService} A new service client with the filter applied. + */ + withFilter(newFilter: common.filters.IFilter): BlobService; + + /** + * Gets the service stats for a storage account’s Blob service. + * + * @this {BlobService} + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `result` will contain the stats and + * `response` will contain information related to this operation. + */ + getServiceStats(options: common.RequestOptions, callback: ErrorOrResult): void; + getServiceStats(callback: ErrorOrResult): void; + + /** + * Gets the properties of a storage account’s Blob service, including Azure Storage Analytics. + * + * @this {BlobService} + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `result` will contain the properties + * and `response` will contain information related to this operation. + */ + getServiceProperties(options: common.RequestOptions, callback?: ErrorOrResult): void; + getServiceProperties(callback?: ErrorOrResult): void; + + /** + * Gets the properties of a storage account. + * + * @this {BlobService} + * @param {string} [container] Optional. Name of an existing container. Required when using a SAS token to a specific container or blob. + * @param {string} [blob] Optional. Name of an existing blob. Required when using a SAS token to a specific blob. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link AccountProperties}` will contain the properties + * and `response` will contain information related to this operation. + */ + getAccountProperties(container?:string, blob?:string, options?: common.RequestOptions, callback?: ErrorOrResult): void; + getAccountProperties(container?:string, blob?:string, callback?: ErrorOrResult): void; + + /** + * Sets the properties of a storage account’s Blob service, including Azure Storage Analytics. + * You can also use this operation to set the default request version for all incoming requests that do not have a version specified. + * When you set blob service properties (such as enabling soft delete), it may take up to 30 seconds to take effect. + * + * @this {BlobService} + * @param {Object} serviceProperties The service properties. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise, `response` + * will contain information related to this operation. + */ + setServiceProperties(serviceProperties: common.models.ServicePropertiesResult.BlobServiceProperties, options: common.RequestOptions, callback: ErrorOrResponse): void; + setServiceProperties(serviceProperties: common.models.ServicePropertiesResult.BlobServiceProperties, callback: ErrorOrResponse): void; + + /** + * Sets the tier of a blockblob under a blob storage LRS account, or the tier of a pageblob under a premium storage account. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} blobTier Please see BlobUtilities.BlobTier.StandardBlobTier or BlobUtilities.BlobTier.PremiumPageBlobTier for possible values. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise, `response` + * will contain information related to this operation. + */ + setBlobTier(container: string, blob: string, blobTier: string, options: common.RequestOptions, callback: ErrorOrResponse): void; + setBlobTier(container: string, blob: string, blobTier: string, callback: ErrorOrResponse): void; + + /** + * Lists a segment containing a collection of container items under the specified account. + * + * @this {BlobService} + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.maxResults] Specifies the maximum number of containers to return per call to Azure storage. + * @param {string} [options.include] Include this parameter to specify that the container's metadata be returned as part of the response body. (allowed values: '', 'metadata') + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of containers and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listContainersSegmented(currentToken: common.ContinuationToken, options: BlobService.ListContainerOptions, callback: ErrorOrResult): void; + listContainersSegmented(currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + /** + * Lists a segment containing a collection of container items whose names begin with the specified prefix under the specified account. + * + * @this {BlobService} + * @param {string} prefix The prefix of the container name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.maxResults] Specifies the maximum number of containers to return per call to Azure storage. + * @param {string} [options.include] Include this parameter to specify that the container's metadata be returned as part of the response body. (allowed values: '', 'metadata') + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of containers and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listContainersSegmentedWithPrefix(prefix: string, currentToken: common.ContinuationToken, options: BlobService.ListContainerOptions, callback: ErrorOrResult): void; + listContainersSegmentedWithPrefix(prefix: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Checks whether or not a container exists on the service. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the container exists, or false if the container does not exist. + * `response` will contain information related to this operation. + */ + doesContainerExist(container: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + + /** + * Checks whether or not a container exists on the service. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the container exists, or false if the container does not exist. + * `response` will contain information related to this operation. + */ + doesContainerExist(container: string, callback: ErrorOrResult): void; + + /** + * Creates a new container under the specified account. + * If a container with the same name already exists, the operation fails. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the container information. + * `response` will contain information related to this operation. + */ + createContainer(container: string, callback: ErrorOrResult): void; + + + /** + * Creates a new container under the specified account. + * If a container with the same name already exists, the operation fails. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the container information. + * `response` will contain information related to this operation. + */ + createContainer(container: string, options: BlobService.CreateContainerOptions, callback: ErrorOrResult): void; + + /** + * Creates a new container under the specified account if the container does not exists. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the container was created, or false if the container + * already exists. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * blobService.createContainerIfNotExists('taskcontainer', {publicAccessLevel : 'blob'}, function(error) { + * if(!error) { + * // Container created or exists, and is public + * } + * }); + */ + createContainerIfNotExists(container: string, callback: ErrorOrResult): void; + + /** + * Creates a new container under the specified account if the container does not exists. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the container was created, or false if the container + * already exists. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * blobService.createContainerIfNotExists('taskcontainer', {publicAccessLevel : 'blob'}, function(error) { + * if(!error) { + * // Container created or exists, and is public + * } + * }); + */ + createContainerIfNotExists(container: string, options: BlobService.CreateContainerOptions, callback: ErrorOrResult): void; + + /** + * Retrieves a container and its properties from a specified account. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the container. + * `response` will contain information related to this operation. + */ + getContainerProperties(container: string, callback: ErrorOrResult): void; + + /** + * Retrieves a container and its properties from a specified account. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {string} [options.leaseId] The container lease identifier. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the container. + * `response` will contain information related to this operation. + */ + getContainerProperties(container: string, options: BlobService.ContainerOptions, callback: ErrorOrResult): void; + + /** + * Returns all user-defined metadata for the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the container. + * `response` will contain information related to this operation. + */ + getContainerMetadata(container: string, callback: ErrorOrResult): void; + + /** + * Returns all user-defined metadata for the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The container lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the container. + * `response` will contain information related to this operation. + */ + getContainerMetadata(container: string, options: BlobService.ContainerOptions, callback: ErrorOrResult): void; + + /** + * Sets the container's metadata. + * + * Calling the Set Container Metadata operation overwrites all existing metadata that is associated with the container. + * It's not possible to modify an individual name/value pair. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} metadata The metadata key/value pairs. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + setContainerMetadata(container: string, metadata: Map, callback: ErrorOrResult): void; + + /** + * Sets the container's metadata. + * + * Calling the Set Container Metadata operation overwrites all existing metadata that is associated with the container. + * It's not possible to modify an individual name/value pair. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} metadata The metadata key/value pairs. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The container lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + setContainerMetadata(container: string, metadata: Map, options: BlobService.ContainerOptions, callback: ErrorOrResult): void; + + /** + * Gets the container's ACL. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the container. + * `response` will contain information related to this operation. + */ + getContainerAcl(container: string, callback: ErrorOrResult): void; + + /** + * Gets the container's ACL. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The container lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the container. + * `response` will contain information related to this operation. + */ + getContainerAcl(container: string, options: BlobService.ContainerOptions, callback: ErrorOrResult): void; + + /** + * Updates the container's ACL. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {[key:string]: AccessPolicy} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. + * @param {Object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. + * @param {string} [options.leaseId] The container lease identifier. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the container. + * `response` will contain information related to this operation. + */ + setContainerAcl(container: string, signedIdentifiers: {[key:string]: common.AccessPolicy}, options: BlobService.ContainerAclOptions, callback: ErrorOrResult): void; + setContainerAcl(container: string, signedIdentifiers: {[key:string]: common.AccessPolicy}, callback: ErrorOrResult): void; + + /** + * Marks the specified container for deletion. + * The container and any blobs contained within it are later deleted during garbage collection. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + deleteContainer(container: string, callback: ErrorOrResponse): void; + + /** + * Marks the specified container for deletion. + * The container and any blobs contained within it are later deleted during garbage collection. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {string} [options.leaseId] The container lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + deleteContainer(container: string, options: BlobService.ContainerOptions, callback: ErrorOrResponse): void; + + /** + * Marks the specified container for deletion if it exists. + * The container and any blobs contained within it are later deleted during garbage collection. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the container exists and was deleted, or false if the container + * did not exist. + * `response` will contain information related to this operation. + */ + deleteContainerIfExists(container: string, callback: ErrorOrResult): void; + + /** + * Marks the specified container for deletion if it exists. + * The container and any blobs contained within it are later deleted during garbage collection. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {string} [options.leaseId] The container lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the container exists and was deleted, or false if the container + * did not exist. + * `response` will contain information related to this operation. + */ + deleteContainerIfExists(container: string, options: BlobService.ContainerOptions, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of blob directory items in the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of `[directories]{@link BlobDirectoryResult}` and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listBlobDirectoriesSegmented(container: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of blob directory items in the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {object} [options] The request options. + * @param {int} [options.maxResults] Specifies the maximum number of directories to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of `[directories]{@link BlobDirectoryResult}` and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listBlobDirectoriesSegmented(container: string, currentToken: common.ContinuationToken, options: BlobService.ListBlobPrefixesSegmentedRequestOptions, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of blob directory items whose names begin with the specified prefix in the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} prefix The prefix of the blob name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of `[directories]{@link BlobDirectoryResult}` and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listBlobDirectoriesSegmentedWithPrefix(container: string, prefix: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of blob directory items whose names begin with the specified prefix in the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} prefix The prefix of the blob directory. + * @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {object} [options] The request options. + * @param {int} [options.maxResults] Specifies the maximum number of directories to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of `[directories]{@link BlobDirectoryResult}` and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listBlobDirectoriesSegmentedWithPrefix(container: string, prefix: string, currentToken: common.ContinuationToken, options: BlobService.ListBlobPrefixesSegmentedRequestOptions, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of blob items in the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of blobs and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listBlobsSegmented(container: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of blob items in the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {string} [options.delimiter] Delimiter, i.e. '/', for specifying folder hierarchy. + * @param {int} [options.maxResults] Specifies the maximum number of blobs to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) + * @param {string} [options.include] Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs', 'copy', 'deleted'). Multiple values can be added separated with a comma (,) + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of blobs and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listBlobsSegmented(container: string, currentToken: common.ContinuationToken, options: BlobService.ListBlobsSegmentedRequestOptions, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of blob items whose names begin with the specified prefix in the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} prefix The prefix of the blob name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the entries of blobs and the continuation token for the next listing operation. + * `response` will contain information related to this operation. + */ + listBlobsSegmentedWithPrefix(container: string, prefix: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of blob items whose names begin with the specified prefix in the container. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} prefix The prefix of the blob name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {string} [options.delimiter] Delimiter, i.e. '/', for specifying folder hierarchy. + * @param {int} [options.maxResults] Specifies the maximum number of blobs to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) + * @param {string} [options.include] Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs', 'copy', 'deleted'). Multiple values can be added separated with a comma (,) + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs]The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the entries of blobs and the continuation token for the next listing operation. + * `response` will contain information related to this operation. + */ + listBlobsSegmentedWithPrefix(container: string, prefix: string, currentToken: common.ContinuationToken, options: BlobService.ListBlobsSegmentedRequestOptions, callback: ErrorOrResult): void; + + /** + * Acquires a new lease. If container and blob are specified, acquires a blob lease. Otherwise, if only container is specified and blob is null, acquires a container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the lease information. + * `response` will contain information related to this operation. + */ + acquireLease(container: string, blob: string, callback: ErrorOrResult): void; + + /** + * Acquires a new lease. If container and blob are specified, acquires a blob lease. Otherwise, if only container is specified and blob is null, acquires a container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.leaseDuration] The lease duration in seconds. A non-infinite lease can be between 15 and 60 seconds. Default is never to expire. + * @param {string} [options.proposedLeaseId] The proposed lease identifier. Must be a GUID. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the lease information. + * `response` will contain information related to this operation. + */ + acquireLease(container: string, blob: string, options: BlobService.AcquireLeaseRequestOptions, callback: ErrorOrResult): void; + + /** + * Renews an existing lease. If container and blob are specified, renews the blob lease. Otherwise, if only container is specified and blob is null, renews the container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} leaseId The lease identifier. Must be a GUID. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the lease information. + * `response` will contain information related to this operation. + */ + renewLease(container: string, blob: string, leaseId: string, callback: ErrorOrResult): void; + + /** + * Renews an existing lease. If container and blob are specified, renews the blob lease. Otherwise, if only container is specified and blob is null, renews the container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} leaseId The lease identifier. Must be a GUID. + * @param {Object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the lease information. + * `response` will contain information related to this operation. + */ + renewLease(container: string, blob: string, leaseId: string, options: BlobService.LeaseRequestOptions, callback: ErrorOrResult): void; + + /** + * Changes the lease ID of an active lease. If container and blob are specified, changes the blob lease. Otherwise, if only container is specified and blob is null, changes the + * container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} leaseId The current lease identifier. + * @param {string} proposedLeaseId The proposed lease identifier. Must be a GUID. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the lease information. + * `response` will contain information related to this operation. + */ + changeLease(container: string, blob: string, leaseId: string, proposedLeaseId: string, callback: ErrorOrResult): void; + + /** + * Changes the lease ID of an active lease. If container and blob are specified, changes the blob lease. Otherwise, if only container is specified and blob is null, changes the + * container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} leaseId The current lease identifier. + * @param {string} proposedLeaseId The proposed lease identifier. Must be a GUID. + * @param {Object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the lease information. + * `response` will contain information related to this operation. + */ + changeLease(container: string, blob: string, leaseId: string, proposedLeaseId: string, options: BlobService.LeaseRequestOptions, callback: ErrorOrResult): void; + + /** + * Releases the lease. If container and blob are specified, releases the blob lease. Otherwise, if only container is specified and blob is null, releases the container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} leaseId The lease identifier. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the lease information. + * `response` will contain information related to this operation. + */ + releaseLease(container: string, blob: string, leaseId: string, callback: ErrorOrResult): void; + + /** + * Releases the lease. If container and blob are specified, releases the blob lease. Otherwise, if only container is specified and blob is null, releases the container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} leaseId The lease identifier. + * @param {Object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the lease information. + * `response` will contain information related to this operation. + */ + releaseLease(container: string, blob: string, leaseId: string, options: BlobService.LeaseRequestOptions, callback: ErrorOrResult): void; + + /** + * Breaks the lease but ensures that another client cannot acquire a new lease until the current lease period has expired. If container and blob are specified, breaks the blob lease. + * Otherwise, if only container is specified and blob is null, breaks the container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the lease information. + * `response` will contain information related to this operation. + */ + breakLease(container: string, blob: string, callback: ErrorOrResult): void; + + /** + * Breaks the lease but ensures that another client cannot acquire a new lease until the current lease period has expired. If container and blob are specified, breaks the blob lease. + * Otherwise, if only container is specified and blob is null, breaks the container lease. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {int} [options.leaseBreakPeriod] The lease break period, between 0 and 60 seconds. If unspecified, a fixed-duration lease breaks after + * the remaining lease period elapses, and an infinite lease breaks immediately. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the lease information. + * `response` will contain information related to this operation. + */ + breakLease(container: string, blob: string, options: BlobService.BreakLeaseRequestOptions, callback: ErrorOrResult): void; + + /** + * Returns all user-defined metadata, standard HTTP properties, and system properties for the blob. + * It does not return or modify the content of the blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + getBlobProperties(container: string, blob: string, callback: ErrorOrResult): void; + + /** + * Returns all user-defined metadata, standard HTTP properties, and system properties for the blob. + * It does not return or modify the content of the blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + getBlobProperties(container: string, blob: string, optionsOrCallback: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + + /** + * Returns all user-defined metadata for the specified blob or snapshot. + * It does not modify or return the content of the blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + getBlobMetadata(container: string, blob: string, options: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + getBlobMetadata(container: string, blob: string, callback: ErrorOrResult): void; + + /** + * Clears user-defined properties for the specified blob or snapshot. + * It does not modify or return the content of the blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + setBlobProperties(container: string, blob: string, callback: ErrorOrResult): void; + + /** + * Sets user-defined properties for the specified blob or snapshot. + * It does not modify or return the content of the blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentMD5] The blob's MD5 hash. + * @param {string} [options.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentDisposition] The blob's content disposition. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + setBlobProperties(container: string, blob: string, optionsOrCallback: BlobService.SetBlobPropertiesRequestOptions, callback: ErrorOrResult): void; + + /** + * Sets user-defined metadata for the specified blob or snapshot as one or more name-value pairs + * It does not modify or return the content of the blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} metadata The metadata key/value pairs. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information on the blob. + * `response` will contain information related to this operation. + */ + setBlobMetadata(container: string, blob: string, metadata: Map, callback: ErrorOrResult): void; + + /** + * Sets user-defined metadata for the specified blob or snapshot as one or more name-value pairs + * It does not modify or return the content of the blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} metadata The metadata key/value pairs. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information on the blob. + * `response` will contain information related to this operation. + */ + setBlobMetadata(container: string, blob: string, metadata: Map, options: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + + /** + * Downloads a blob into a file. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} localFileName The local path to the file to be downloaded. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The upload tracker objects. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. + * @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. + * @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the blob information. + * `response` will contain information related to this operation. + * @return {SpeedSummary} + * + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * blobService.getBlobToLocalFile('taskcontainer', 'task1', 'task1-download.txt', function(error, serverBlob) { + * if(!error) { + * // Blob available in serverBlob.blob variable + * } + */ + getBlobToLocalFile(container: string, blob: string, localFileName: string, options: BlobService.GetBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + getBlobToLocalFile(container: string, blob: string, localFileName: string, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + /** + * Provides a stream to read from a blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. + * @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. + * @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the blob information. + * `response` will contain information related to this operation. + * @return {Readable} A Node.js Readable stream. + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * var writable = fs.createWriteStream(destinationFileNameTarget); + * blobService.createReadStream(containerName, blobName).pipe(writable); + */ + createReadStream(container: string, blob: string, options: BlobService.GetBlobRequestOptions, callback: ErrorOrResult): stream.Readable; + createReadStream(container: string, blob: string, callback: ErrorOrResult): stream.Readable; + + /** + * Downloads a blob into a stream. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Writable} writeStream The Node.js Writable stream. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the blob information. + * `response` will contain information related to this operation. + * @return {SpeedSummary} + * + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * blobService.getBlobToStream('taskcontainer', 'task1', fs.createWriteStream('task1-download.txt'), function(error, serverBlob) { + * if(!error) { + * // Blob available in serverBlob.blob variable + * } + * }); + */ + getBlobToStream(container: string, blob: string, writeStream: stream.Writable, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Downloads a blob into a stream. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Writable} writeStream The Node.js Writable stream. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. + * @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. + * @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. + * @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the blob information. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * blobService.getBlobToStream('taskcontainer', 'task1', fs.createWriteStream('task1-download.txt'), function(error, serverBlob) { + * if(!error) { + * // Blob available in serverBlob.blob variable + * } + * }); + */ + getBlobToStream(container: string, blob: string, writeStream: stream.Writable, options: BlobService.GetBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Downloads a blob into a text string. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. + * @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. + * @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {BlobService~blobToText} callback `error` will contain information + * if an error occurs; otherwise `text` will contain the blob contents, + * and `blockBlob` will contain + * the blob information. + * `response` will contain information related to this operation. + */ + getBlobToText(container: string, blob: string, options: BlobService.GetBlobRequestOptions, callback: BlobService.BlobToText): void; + getBlobToText(container: string, blob: string, callback: BlobService.BlobToText): void; + + /** + * Marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. + * If a blob has snapshots, you must delete them when deleting the blob. Using the deleteSnapshots option, you can choose either to delete both the blob and its snapshots, + * or to delete only the snapshots but not the blob itself. If the blob has snapshots, you must include the deleteSnapshots option or the blob service will return an error + * and nothing will be deleted. + * If you are deleting a specific snapshot using the snapshotId option, the deleteSnapshots option must NOT be included. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; `response` will contain information related to this operation. + */ + deleteBlob(container: string, blob: string, callback: ErrorOrResponse): void; + + /** + * Marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. + * If a blob has snapshots, you must delete them when deleting the blob. Using the deleteSnapshots option, you can choose either to delete both the blob and its snapshots, + * or to delete only the snapshots but not the blob itself. If the blob has snapshots, you must include the deleteSnapshots option or the blob service will return an error + * and nothing will be deleted. + * If you are deleting a specific snapshot using the snapshotId option, the deleteSnapshots option must NOT be included. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.BlobUtilities.SnapshotDeleteOptions.*. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; `response` will contain information related to this operation. + */ + deleteBlob(container: string, blob: string, options: BlobService.DeleteBlobRequestOptions, callback: ErrorOrResponse): void; + + /** + * The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot. + * Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; `response` will contain information related to this operation. + */ + undeleteBlob(container: string, blob: string, callback: ErrorOrResponse): void; + undeleteBlob(container: string, blob: string, options: BlobService.ConditionalRequestOption, callback: ErrorOrResponse): void; + + /** + * Marks the specified blob or snapshot for deletion if it exists. The blob is later deleted during garbage collection. + * If a blob has snapshots, you must delete them when deleting the blob. Using the deleteSnapshots option, you can choose either to delete both the blob and its snapshots, + * or to delete only the snapshots but not the blob itself. If the blob has snapshots, you must include the deleteSnapshots option or the blob service will return an error + * and nothing will be deleted. + * If you are deleting a specific snapshot using the snapshotId option, the deleteSnapshots option must NOT be included. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the blob was deleted, or false if the blob + * does not exist. + * `response` will contain information related to this operation. + */ + deleteBlobIfExists(container: string, blob: string, callback: ErrorOrResult): void; + + /** + * Marks the specified blob or snapshot for deletion if it exists. The blob is later deleted during garbage collection. + * If a blob has snapshots, you must delete them when deleting the blob. Using the deleteSnapshots option, you can choose either to delete both the blob and its snapshots, + * or to delete only the snapshots but not the blob itself. If the blob has snapshots, you must include the deleteSnapshots option or the blob service will return an error + * and nothing will be deleted. + * If you are deleting a specific snapshot using the snapshotId option, the deleteSnapshots option must NOT be included. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.BlobUtilities.SnapshotDeleteOptions.*. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the blob was deleted, or false if the blob + * does not exist. + * `response` will contain information related to this operation. + */ + deleteBlobIfExists(container: string, blob: string, options: BlobService.DeleteBlobRequestOptions, callback: ErrorOrResult): void; + + /** + * Checks whether or not a blob exists on the service. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `errorOrResult` will + * be true if the blob exists, or false if the blob does not exist. + * `response` will contain information related to this operation. + */ + doesBlobExist(container: string, blob: string, callback: ErrorOrResult): void; + + /** + * Checks whether or not a blob exists on the service. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `errorOrResult` will + * be true if the blob exists, or false if the blob does not exist. + * `response` will contain information related to this operation. + */ + doesBlobExist(container: string, blob: string, options: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + + /** + * Creates a read-only snapshot of a blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {string} [options.leaseId] The lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the ID of the snapshot. + * `response` will contain information related to this operation. + */ + createBlobSnapshot(container: string, blob: string, options: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + createBlobSnapshot(container: string, blob: string, callback: ErrorOrResult): void; + + /** + * Starts to copy a blob or an Azure Storage file to a destination blob. + * + * For an asynchronous copy(by default), this operation returns a object including a copy ID which + * you can use to check or abort the copy operation. The Blob service copies blobs on a best-effort basis. + * The source blob for an asynchronous copy operation may be a block blob, an append blob, + * a page blob or an Azure Storage file. + * + * Refer to https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob for more details. + * + * @this {BlobService} + * @param {string} sourceUri The source blob URI. + * @param {string} targetContainer The target container name. + * @param {string} targetBlob The target blob name. + * @param {Object} [options] The request options. + * @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. + * @param {boolean} [options.isIncrementalCopy] If it's incremental copy or not. Refer to https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob + * @param {string} [options.snapshotId] The source blob snapshot identifier. + * @param {Object} [options.metadata] The target blob metadata key/value pairs. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {string} [options.sourceLeaseId] The source blob lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {AccessConditions} [options.sourceAccessConditions] The source access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the blob information. + * `response` will contain information related to this operation. + */ + startCopyBlob(sourceUri: string, targetcontainer: string, targetblob: string, options: BlobService.CopyBlobRequestOptions, callback: ErrorOrResult): void; + startCopyBlob(sourceUri: string, targetcontainer: string, targetblob: string, callback: ErrorOrResult): void; + + /** + * Abort a blob copy operation. + * + * @this {BlobService} + * @param {string} container The destination container name. + * @param {string} blob The destination blob name. + * @param {string} copyId The copy operation identifier. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {ErrorOrResponse} callback `error` will contain information. + * `response` will contain information related to this operation. + */ + abortCopyBlob(container: string, blob: string, copyId: string, options: BlobService.BlobRequestOptions, callback: ErrorOrResponse): void; + abortCopyBlob(container: string, blob: string, copyId: string, callback: ErrorOrResponse): void; + + /** + * Retrieves a shared access signature token. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} [blob] The blob name. + * @param {Object} sharedAccessPolicy The shared access policy. + * @param {string} [sharedAccessPolicy.Id] The signed identifier. + * @param {Object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. + * @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). + * @param {date|string} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired (The UTC value will be used). + * @param {Object} [headers] The optional header values to set for a blob returned wth this SAS. + * @param {string} [headers.cacheControl] The optional value of the Cache-Control response header to be returned when this SAS is used. + * @param {string} [headers.contentType] The optional value of the Content-Type response header to be returned when this SAS is used. + * @param {string} [headers.contentEncoding] The optional value of the Content-Encoding response header to be returned when this SAS is used. + * @param {string} [headers.contentLanguage] The optional value of the Content-Language response header to be returned when this SAS is used. + * @param {string} [headers.contentDisposition] The optional value of the Content-Disposition response header to be returned when this SAS is used. + * @return {string} The shared access signature. Note this does not contain the leading "?". + */ + generateSharedAccessSignature(container: string, blob: string, sharedAccessPolicy: common.SharedAccessPolicy, headers?: common.ContentSettingsHeaders): string; + + /** + * Retrieves a shared access signature token. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} [blob] The blob name. + * @param {Object} sharedAccessPolicy The shared access policy. + * @param {string} [sharedAccessPolicy.Id] The signed identifier. + * @param {Object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. + * @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). + * @param {date|string} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired (The UTC value will be used). + * @param {string} [sasVersion] An optional string indicating the desired SAS version to use. Value must be 2012-02-12 or later. + * @param {Object} [headers] The optional header values to set for a blob returned wth this SAS. + * @param {string} [headers.cacheControl] The optional value of the Cache-Control response header to be returned when this SAS is used. + * @param {string} [headers.contentType] The optional value of the Content-Type response header to be returned when this SAS is used. + * @param {string} [headers.contentEncoding] The optional value of the Content-Encoding response header to be returned when this SAS is used. + * @param {string} [headers.contentLanguage] The optional value of the Content-Language response header to be returned when this SAS is used. + * @param {string} [headers.contentDisposition] The optional value of the Content-Disposition response header to be returned when this SAS is used. + * @return {string} The shared access signature query string. Note this string does not contain the leading "?". + */ + generateSharedAccessSignatureWithVersion(container: string, blob: string, sharedAccessPolicy: common.SharedAccessPolicy, sasVersion: string, headers?: common.ContentSettingsHeaders): string; + + /** + * Retrieves a blob or container URL. + * + * @param {string} container The container name. + * @param {string} [blob] The blob name. + * @param {string} [sasToken] The Shared Access Signature token. + * @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. + * @param {string} [snapshotId] The snapshot identifier. + * @return {string} The formatted URL string. + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * //create a SAS that expires in an hour + * var sasToken = blobService.generateSharedAccessSignature(containerName, blobName, { AccessPolicy: { Expiry: azure.date.minutesFromNow(60); } }); + * var sasUrl = blobService.getUrl(containerName, blobName, sasToken, true); + */ + getUrl(container: string, blob?: string, sasToken?: string, primary?: boolean, snapshotId?: string): string + + createPageBlob(container: string, blob: string, length: number, callback: ErrorOrResponse): void; + + createPageBlob(container: string, blob: string, length: number, options: BlobService.CreatePageBlobOptions, callback: ErrorOrResponse): void; + + /** + * Uploads a page blob from file. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param (string) localFileName The local path to the file to be uploaded. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The upload tracker objects. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. + * The default value is false for page blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + createPageBlobFromLocalFile(container: string, blob: string, localFileName: string, options: BlobService.CreatePageBlobOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createPageBlobFromLocalFile(container: string, blob: string, localFileName: string, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads a page blob from an HTML file. If the blob already exists on the service, it will be overwritten. + * To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. + * (Only available in the JavaScript Client Library for Browsers) + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} browserFile The File object to be uploaded created by HTML File API. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The upload tracker objects. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. + * The default value is false for page blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `[result]{@link BlobResult}` will contain + * the blob information. + * `response` will contain information related to this operation. + * @return {SpeedSummary} + */ + createPageBlobFromBrowserFile(container: string, blob: string, browserFile: Object, options: BlobService.CreatePageBlobOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createPageBlobFromBrowserFile(container: string, blob: string, browserFile: Object, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads a page blob from a stream. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param (Stream) stream Stream to the data to store. + * @param {int} streamLength The length of the stream to upload. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects; + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. + * The default value is false for page blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + createPageBlobFromStream(container: string, blob: string, stream: stream.Readable, streamLength: number, options: BlobService.CreatePageBlobOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createPageBlobFromStream(container: string, blob: string, stream: stream.Readable, streamLength: number, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Provides a stream to write to a page blob. Assumes that the blob exists. + * If it does not, please create the blob using createPageBlob before calling this method or use createWriteStreamNewPageBlob. + * Please note the `Stream` returned by this API should be used with piping. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. + * The default value is false for page blobs and true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `[result]{@link BlobResult}` will contain + * the blob information. + * `response` will contain information related to this operation. + * @return {Writable} A Node.js Writable stream. + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * blobService.createPageBlob(containerName, blobName, 1024, function (err) { + * // Pipe file to a blob + * var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToExistingPageBlob(containerName, blobName)); + * }); + */ + createWriteStreamToExistingPageBlob(container: string, blob: string, options: BlobService.CreatePageBlobOptions, callback: ErrorOrResult): stream.Writable; + createWriteStreamToExistingPageBlob(container: string, blob: string, callback: ErrorOrResult): stream.Writable; + + /** + * Provides a stream to write to a page blob. Creates the blob before writing data. + * Please note the `Stream` returned by this API should be used with piping. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} length The blob length. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. + * The default value is false for page blobs and true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `[result]{@link BlobResult}` will contain + * the blob information. + * `response` will contain information related to this operation. + * @return {Writable} A Node.js Writable stream. + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * blobService.createPageBlob(containerName, blobName, 1024, function (err) { + * // Pipe file to a blob + * var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToNewPageBlob(containerName, blobName)); + * }); + */ + createWriteStreamToNewPageBlob(container: string, blob: string, length: number, options: BlobService.CreatePageBlobOptions, callback: ErrorOrResult): stream.Writable; + createWriteStreamToNewPageBlob(container: string, blob: string, length: number, callback: ErrorOrResult): stream.Writable; + + /** + * Updates a page blob from a stream. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Readable} readStream The Node.js Readable stream. + * @param {int} rangeStart The range start. + * @param {int} rangeEnd The range end. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.transactionalContentMD5] An optional hash value used to ensure transactional integrity for the page. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the blob information. + * `response` will contain information related to this operation. + */ + createPagesFromStream(container: string, blob: string, readStream: stream.Readable, rangeStart: number, rangeEnd: number, options: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + createPagesFromStream(container: string, blob: string, readStream: stream.Readable, rangeStart: number, rangeEnd: number, callback: ErrorOrResult): void; + + /** + * Lists page ranges. Lists all of the page ranges by default, or only the page ranges over a specific range of bytes if rangeStart and rangeEnd are specified. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {int} [options.rangeStart] The range start. + * @param {int} [options.rangeEnd] The range end. + * @param {string} [options.snapshotId] The snapshot identifier. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the page range information. + * `response` will contain information related to this operation. + */ + listPageRanges(container: string, blob: string, options: BlobService.GetBlobRequestOptions, callback: ErrorOrResult): void; + listPageRanges(container: string, blob: string, callback: ErrorOrResult): void; + + getPageRangesDiff(container: string, blob: string, previousSnapshotTime: string, options: BlobService.GetBlobRequestOptions, callback: ErrorOrResult): void; + getPageRangesDiff(container: string, blob: string, previousSnapshotTime: string, callback: ErrorOrResult): void; + + /** + * Clears a range of pages. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {int} rangeStart The range start. + * @param {int} rangeEnd The range end. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + clearPageRange(container: string, blob: string, rangeStart: number, rangeEnd: number, options: BlobService.BlobRequestOptions, callback: ErrorOrResponse): void; + clearPageRange(container: string, blob: string, rangeStart: number, rangeEnd: number, callback: ErrorOrResponse): void; + + /** + * Resizes a page blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {String} size The size of the page blob, in bytes. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The blob lease identifier. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + resizePageBlob(container: string, blob: string, size: number, options: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + resizePageBlob(container: string, blob: string, size: number, callback: ErrorOrResult): void; + + /** + * Sets the page blob's sequence number. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {SequenceNumberAction} sequenceNumberAction A value indicating the operation to perform on the sequence number. + * The allowed values are defined in azure.BlobUtilities.SequenceNumberAction. + * @param {string} sequenceNumber The sequence number. The value of the sequence number must be between 0 and 2^63 - 1. + * Set this parameter to null if this operation is an increment action. + * @param {Object} [options] The request options. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + setPageBlobSequenceNumber(container: string, blob: string, sequenceNumberAction: string, sequenceNumber: number, options: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + setPageBlobSequenceNumber(container: string, blob: string, sequenceNumberAction: string, sequenceNumber: number, callback: ErrorOrResult): void; + + /** + * Creates a new block blob or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. + * Partial updates are not supported with Put Blob; The content of the existing blob is overwritten with the content of the new blob. + * To perform a partial update of the content of a block blob, use the Put Block List operation. + * Calling Put Blob to create a page blob only initializes the blob. To add content to a page blob, call the Put Page operation. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} localFileName The local path to the file to be uploaded. + * @param {Object} [options] The request options. + * @param {int} [options.blockSize] The size of each block. Maximum is 100MB. + * @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `[result]{@link BlobResult}` will contain + * the blob information. + * `response` will contain information related to this operation. + * @return {SpeedSummary} + */ + createBlockBlobFromLocalFile(container: string, blob: string, localFileName: string, options: BlobService.CreateBlockBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createBlockBlobFromLocalFile(container: string, blob: string, localFileName: string, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Creates a new block blob. If the blob already exists on the service, it will be overwritten. + * To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. + * (Only available in the JavaScript Client Library for Browsers) + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} browserFile The File object to be uploaded created by HTML File API. + * @param {Object} [options] The request options. + * @param {int} [options.blockSize] The size of each block. Maximum is 100MB. + * @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `[result]{@link BlobResult}` will contain + * the blob information. + * `response` will contain information related to this operation. + * @return {SpeedSummary} + */ + createBlockBlobFromBrowserFile(container: string, blob: string, browserFile: Object, options: BlobService.CreateBlockBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createBlockBlobFromBrowserFile(container: string, blob: string, browserFile: Object, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads a block blob from a stream. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param (Stream) stream Stream to the data to store. + * @param {int} streamLength The length of the stream to upload. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + createBlockBlobFromStream(container: string, blob: string, stream: stream.Readable, streamLength: number, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads a block blob from a stream. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param (Stream) stream Stream to the data to store. + * @param {int} streamLength The length of the stream to upload. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects. + * @param {int} [options.blockSize] The size of each block. Maximum is 100MB. + * @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + createBlockBlobFromStream(container: string, blob: string, stream: stream.Readable, streamLength: number, options: BlobService.CreateBlockBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads a block blob from a text string. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string|object} text The blob text, as a string or in a Buffer. + * @param {Object} [options] The request options. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + createBlockBlobFromText(container: string, blob: string, text: string | Buffer, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): void; + createBlockBlobFromText(container: string, blob: string, text: string | Buffer, callback: ErrorOrResult): void; + + /** + * Provides a stream to write to a block blob. + * Please note the `Stream` returned by this API should be used with piping. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {int} [options.blockSize] The size of each block. Maximum is 100MB. + * @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. + * @param {string} [options.leaseId] The lease identifier. + * @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. + * The default value is false for page blobs and true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + * @return {Writable} A Node.js Writable stream. + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToBlockBlob(containerName, blobName, { blockIdPrefix: 'block' })); + */ + createWriteStreamToBlockBlob(container: string, blob: string, options: BlobService.CreateBlockBlobRequestOptions, callback?: ErrorOrResult): stream.Writable; + createWriteStreamToBlockBlob(container: string, blob: string, callback?: ErrorOrResult): stream.Writable; + + /** + * Creates a new block to be committed as part of a blob. + * + * @this {BlobService} + * @param {string} blockId The block identifier. + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Readable} readStream The Node.js Readable stream. + * @param {int} streamLength The stream length. + * @param {Object} [options] The request options. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + createBlockFromStream(blockId: string, container: string, blob: string, readStream: stream.Readable, streamLength: number, options: BlobService.BlobRequestOptions, callback: ErrorOrResponse): void; + createBlockFromStream(blockId: string, container: string, blob: string, readStream: stream.Readable, streamLength: number, callback: ErrorOrResponse): void; + + /** + * Creates a new block to be committed as part of a blob. + * + * @this {BlobService} + * @param {string} blockId The block identifier. + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string|buffer} content The block content. + * @param {Object} [options] The request options. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + createBlockFromText(blockId: string, container: string, blob: string, content: string | Buffer, options: BlobService.BlobRequestOptions, callback: ErrorOrResponse): void; + createBlockFromText(blockId: string, container: string, blob: string, content: string | Buffer, callback: ErrorOrResponse): void; + + /** + * Creates a new block to be committed as part of a blob from an URL of an Azure blob or file. + * + * @this {BlobService} + * @param {string} blockId The block identifier. + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} sourceURL The URL of the source data. + * It can point to any Azure Blob or File, that is either public or has a shared access signature attached. + * @param {int} sourceRangeStart The start of the range of bytes(inclusive) that has to be taken from the copy source. + * @param {int} sourceRangeEnd The end of the range of bytes(inclusive) that has to be taken from the copy source. + * @param {object} [options] The request options. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + createBlockFromURL(blockId: string, container: string, blob: string, sourceURL: string, sourceRangeStart: number, sourceRangeEnd: number, options: BlobService.CreateBlockRequestOptions, callback: ErrorOrResponse): void; + createBlockFromURL(blockId: string, container: string, blob: string, sourceURL: string, sourceRangeStart: number, sourceRangeEnd: number, callback: ErrorOrResponse): void; + + /** + * Writes a blob by specifying the list of block IDs that make up the blob. + * In order to be written as part of a blob, a block must have been successfully written to the server in a prior + * createBlock operation. + * Note: If no valid list is specified in the blockList parameter, blob would be updated with empty content, + * i.e. existing blocks in the blob will be removed, this behavior is kept for backward compatibility consideration. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} blockList The wrapper for block ID list contains block IDs that make up the blob. + * Three kinds of list are provided, please choose one to use according to requirement. + * For more background knowledge, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list + * @param {string[]} [blockList.LatestBlocks] The list contains block IDs that make up the blob sequentially. + * All the block IDs in this list will be specified within Latest element. + * Choose this list to contain block IDs indicates that the Blob service should first search + * the uncommitted block list, and then the committed block list for the named block. + * @param {string[]} [blockList.CommittedBlocks] The list contains block IDs that make up the blob sequentially. + * All the block IDs in this list will be specified within Committed element. + * Choose this list to contain block IDs indicates that the Blob service should only search + * the committed block list for the named block. + * @param {string[]} [blockList.UncommittedBlocks] The list contains block IDs that make up the blob sequentially. + * All the block IDs in this list will be specified within Uncommitted element. + * Choose this list to contain block IDs indicates that the Blob service should only search + * the uncommitted block list for the named block. + * @param {Object} [options] The request options. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the blocklist information. + * `response` will contain information related to this operation. + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * blobService.createBlockFromText("sampleBlockName", containerName, blobName, "sampleBlockContent", function(error) { + * assert.equal(error, null); + * // In this example, LatestBlocks is used, we hope the Blob service first search + * // the uncommitted block list, and then the committed block list for the named block "sampleBlockName", + * // and thus make sure the block is with latest content. + * blobService.commitBlocks(containerName, blobName, { LatestBlocks: ["sampleBlockName"] }, function(error) { + * assert.equal(error, null); + * }); + * }); + */ + commitBlocks(container: string, blob: string, blockList: BlobService.PutBlockListRequest, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): void; + commitBlocks(container: string, blob: string, blockList: BlobService.PutBlockListRequest, callback: ErrorOrResult): void; + + /** + * Retrieves the list of blocks that have been uploaded as part of a block blob. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {BlockListFilter} blocklisttype The type of block list to retrieve. + * @param {Object} [options] The request options. + * @param {string} [options.snapshotId] The source blob snapshot identifier. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the blocklist information. + * `response` will contain information related to this operation. + */ + listBlocks(container: string, blob: string, blocklisttype: string, options: BlobService.BlobRequestOptions, callback: ErrorOrResult): void; + listBlocks(container: string, blob: string, blocklisttype: string, callback: ErrorOrResult): void; + + /** + * Generate a random block id prefix + */ + generateBlockIdPrefix(): string; + + /** + * Get a block id according to prefix and block number + */ + getBlockId(prefix: string, number: number | string): string; + + /** + * Creates an empty append blob. If the blob already exists on the service, it will be overwritten. + * To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + createOrReplaceAppendBlob(container: string, blob: string, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResponse): void; + createOrReplaceAppendBlob(container: string, blob: string, callback: ErrorOrResponse): void; + + /** + * Creates a new append blob from a local file. If the blob already exists on the service, it will be overwritten. + * To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * If you want to append data to an already existing blob, please look at appendFromLocalFile. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} localFileName The local path to the file to be uploaded. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + createAppendBlobFromLocalFile(container: string, blob: string, localFileName: string, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createAppendBlobFromLocalFile(container: string, blob: string, localFileName: string, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Creates a new append blob from an HTML File object. If the blob already exists on the service, it will be overwritten. + * To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * If you want to append data to an already existing blob, please look at appendFromBrowserFile. + * (Only available in the JavaScript Client Library for Browsers) + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} browserFile The File object to be uploaded created by HTML File API. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 ahash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `[result]{@link BlobResult}` will contain + * the blob information. + * `response` will contain information related to this operation. + * @return {SpeedSummary} + */ + createAppendBlobFromBrowserFile(container: string, blob: string, browserFile: Object, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createAppendBlobFromBrowserFile(container: string, blob: string, browserFile: Object, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads an append blob from a stream. If the blob already exists on the service, it will be overwritten. + * To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * If you want to append data to an already existing blob, please look at appendFromStream. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param (Stream) stream Stream to the data to store. + * @param {int} streamLength The length of the stream to upload. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + createAppendBlobFromStream(container: string, blob: string, stream: stream.Readable, streamLength: number, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createAppendBlobFromStream(container: string, blob: string, stream: stream.Readable, streamLength: number, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads an append blob from a text string. If the blob already exists on the service, it will be overwritten. + * To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * If you want to append data to an already existing blob, please look at appendFromText. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string|object} text The blob text, as a string or in a Buffer. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + createAppendBlobFromText(container: string, blob: string, text: string | Buffer, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): void; + createAppendBlobFromText(container: string, blob: string, text: string | Buffer, callback: ErrorOrResult): void; + + /** + * Provides a stream to write to a new append blob. If the blob already exists on the service, it will be overwritten. + * To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * Please note the `Stream` returned by this API should be used with piping. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. + * The default value is false for page blobs and true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + * @return {Writable} A Node.js Writable stream. + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToAppendBlob(containerName, blobName)); + */ + createWriteStreamToNewAppendBlob(container: string, blob: string, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): stream.Writable; + createWriteStreamToNewAppendBlob(container: string, blob: string, callback: ErrorOrResult): stream.Writable; + + /** + * Provides a stream to write to an existing append blob. Assumes that the blob exists. + * If it does not, please create the blob using createAppendBlob before calling this method or use createWriteStreamToNewAppendBlob. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * Please note the `Stream` returned by this API should be used with piping. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. + * The default value is false for page blobs and true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + * @return {Writable} A Node.js Writable stream. + * @example + * var azure = require('azure-storage'); + * var blobService = azure.createBlobService(); + * var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToAppendBlob(containerName, blobName)); + */ + createWriteStreamToExistingAppendBlob(container: string, blob: string, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): stream.Writable; + createWriteStreamToExistingAppendBlob(container: string, blob: string, callback: ErrorOrResult): stream.Writable; + + /** + * Appends to an append blob from a local file. Assumes the blob already exists on the service. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string} localFileName The local path to the file to be uploaded. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + appendFromLocalFile(container: string, blob: string, localFileName: string, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + appendFromLocalFile(container: string, blob: string, localFileName: string, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Appends to an append blob from an HTML File object. Assumes the blob already exists on the service. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * (Only available in the JavaScript Client Library for Browsers) + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Object} browserFile The File object to be uploaded created by HTML File API. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `[result]{@link BlobResult}` will contain + * the blob information. + * `response` will contain information related to this operation. + * @return {SpeedSummary} + */ + appendFromBrowserFile(container: string, blob: string, browserFile: Object, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + appendFromBrowserFile(container: string, blob: string, browserFile: Object, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Appends to an append blob from a stream. Assumes the blob already exists on the service. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param (Stream) stream Stream to the data to store. + * @param {int} streamLength The length of the stream to upload. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + appendFromStream(container: string, blob: string, stream: stream.Readable, streamLength: number, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary;; + appendFromStream(container: string, blob: string, stream: stream.Readable, streamLength: number, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary;; + + /** + * Appends to an append blob from a text string. Assumes the blob already exists on the service. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string|object} text The blob text, as a string or in a Buffer. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {string} [options.leaseId] The lease identifier. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The content settings of the blob. + * @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. + * @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + appendFromText(container: string, blob: string, text: string, options: BlobService.CreateBlobRequestOptions, callback: ErrorOrResult): void; + appendFromText(container: string, blob: string, text: string, callback: ErrorOrResult): void; + + /** + * Creates a new block from a read stream to be appended to an append blob. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {Readable} readStream The Node.js Readable stream. + * @param {int} streamLength The stream length. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. + * @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + appendBlockFromStream(container: string, blob: string, readStream: stream.Readable, streamLength: number, options: BlobService.AppendBlobRequestOptions, callback: ErrorOrResult): void; + appendBlockFromStream(container: string, blob: string, readStream: stream.Readable, streamLength: number, callback: ErrorOrResult): void; + + /** + * Creates a new block from a text to be appended to an append blob. + * This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. + * If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. + * + * @this {BlobService} + * @param {string} container The container name. + * @param {string} blob The blob name. + * @param {string|object} content The block text, as a string or in a Buffer. + * @param {Object} [options] The request options. + * @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. + * @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. + * @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. + * @param {string} [options.leaseId] The target blob lease identifier. + * @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the blob. + * `response` will contain information related to this operation. + */ + appendBlockFromText(container: string, blob: string, content: string | Buffer, options: BlobService.AppendBlobRequestOptions, callback: ErrorOrResult): void; + appendBlockFromText(container: string, blob: string, content: string | Buffer, callback: ErrorOrResult): void; + + /** + * The callback for {BlobService~getBlobToText}. + * @typedef {function} BlobService~blobToText + * @param {Object} error If an error occurs, the error information. + * @param {string} text The text returned from the blob. + * @param {Object} blockBlob Information about the blob. + * @param {Object} response Information related to this operation. + */ + static SpeedSummary: common.streams.speedsummary.SpeedSummary; + } + export module BlobService { + export interface CreateContainerOptions extends common.RequestOptions { + metadata?: Map; + publicAccessLevel?: string; + } + + export interface ListContainerOptions extends common.RequestOptions { + maxResults?: number; + include?: string; + } + + export interface ConditionalRequestOption extends common.RequestOptions { + accessConditions?: AccessConditions; + } + + export interface ContainerOptions extends ConditionalRequestOption { + leaseId?: string; + } + + export interface ContainerAclOptions extends ContainerOptions { + publicAccessLevel?: string; + } + + export interface LeaseRequestOptions extends ConditionalRequestOption { + } + + export interface AcquireLeaseRequestOptions extends LeaseRequestOptions { + leaseDuration?: number; + proposedLeaseId?: string; + } + + export interface BreakLeaseRequestOptions extends LeaseRequestOptions { + leaseBreakPeriod?: number; + } + + export interface ListBlobsSegmentedRequestOptions extends common.RequestOptions { + /** + * {string} Delimiter, i.e. '/', for specifying folder hierarchy. + */ + delimiter?: string; + /** + * {int} Specifies the maximum number of blobs to return per call to Azure ServiceClient. (maximum: 5000) + */ + maxResults?: number; + + /** + * {string} Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs'). Multiple values can be added separated with a comma (,) + */ + include?: string; + } + + export interface ListBlobPrefixesSegmentedRequestOptions extends ListBlobsSegmentedRequestOptions {} + + export interface LeaseResult { + container: string; + blob: string; + id: string; + time: number; + etag: string; + lastModified: string; + } + + export interface ListBlobsResult { + entries: BlobResult[]; + continuationToken?: common.ContinuationToken; + } + + export interface ListBlobDirectoriesResult { + entries: BlobDirectoryResult[]; + continuationToken?: common.ContinuationToken; + } + + export interface ContainerAclResult extends ContainerResult { + signedIdentifiers: {[key:string]: common.AccessPolicy} + } + + export interface ContainerResult { + name: string; + publicAccessLevel: string; + etag: string; + lastModified: string; + hasImmutabilityPolicy: boolean; + hasLegalHold: boolean; + metadata?: { [key: string]: string; }; + requestId?: string; + lease?: { + duration?: string; + status: string; + state: string; + }; + exists?: boolean; + created?: boolean; + } + + export interface ListContainerResult { + continuationToken: common.ContinuationToken; + entries: ContainerResult[]; + } + + export interface BlobDirectoryResult { + name: string; + } + + export interface BlobResult { + name: string; + snapshot?: string; + deleted?: boolean; + container: string; + metadata?: { [key: string]: string; }; + etag: string; + lastModified: string; + creationTime: string; + contentLength: string; + blobType: string; + accessTier?: string; + accessTierChangeTime?: string; + accessTierInferred?: boolean; + archiveStatus?: string; + isIncrementalCopy?: boolean; + requestId: string; + sequenceNumber?: string; + contentRange?: string; + committedBlockCount?: string; + serverEncrypted?: string; + deletedTime?: string; + remainingRetentionDays?: string; + appendOffset? : string; + contentSettings?: { + contentType?: string; + contentEncoding?: string; + contentLanguage?: string; + cacheControl?: string; + contentDisposition?: string; + contentMD5?: string; + } + lease?: { + id?: string; + status?: string; + state?: string; + duration?: string; + } + copy?: { + id?: string; + status?: string; + completionTime?: string; + statusDescription?: string; + destinationSnapshot?: string; + progress?: string; + source?: string; + }, + exists?: boolean; + created?: boolean; + } + + export interface CreatePageBlobOptions { + metadata?: Object; + leaseId?: string; + transactionalContentMD5?: string; + blobTier?: string; + contentSettings?: { + contentType?: string; + contentEncoding?: string; + contentLanguage?: string; + cacheControl?: string; + contentDisposition?: string; + contentMD5?: string; + } + sequenceNumber?: string; + accessConditions?: AccessConditions; + locationMode?: StorageUtilities.LocationMode; + timeoutIntervalInMs?: number; + clientRequestTimeoutInMs?: number; + maximumExecutionTimeInMs?: number; + useNagleAlgorithm?: boolean; + } + + export interface BlobRequestOptions extends ConditionalRequestOption { + snapshotId?: string; // TODO: Not valid for most write requests... + leaseId?: string; + } + + export interface CreateBlockRequestOptions extends BlobRequestOptions { + transactionalContentMD5?: string; + } + + export interface AppendBlobRequestOptions extends ConditionalRequestOption, BlobRequestOptions { + absorbConditionalErrorsOnRetry?: boolean; + maxBlobSize?: number; + appendPosition?: number; + } + + export interface SetBlobPropertiesRequestOptions extends BlobRequestOptions { + contentType?: string; + contentEncoding?: string; + contentLanguage?: string; + contentMD5?: string; + cacheControl?: string; + contentDisposition?: string; + } + + export interface GetBlobRequestOptions extends BlobRequestOptions { + speedSummary?: common.streams.speedsummary.SpeedSummary; + parallelOperationThreadCount?: number; + rangeStart?: number; + rangeEnd?: number; + useTransactionalMD5?: boolean; + disableContentMD5Validation?: boolean; + } + + export interface CopyBlobRequestOptions extends BlobRequestOptions { + metadata?: { [k: string]: string; }; + sourceLeaseId?: string; + accessConditions?: AccessConditions; + sourceAccessConditions?: AccessConditions; + isIncrementalCopy?: boolean; + } + + export interface DeleteBlobRequestOptions extends BlobRequestOptions { + deleteSnapshots?: string; + } + + export interface CreateBlobRequestOptions extends BlobRequestOptions { + speedSummary?: common.streams.speedsummary.SpeedSummary; + parallelOperationThreadCount?: number; + useTransactionalMD5?: boolean; + blockIdPrefix?: string; + metadata?: {[k: string]: string}; + storeBlobContentMD5?: boolean; + transactionalContentMD5?: string; + contentSettings?: { + contentType?: string; + contentEncoding?: string; + contentLanguage?: string; + cacheControl?: string; + contentDisposition?: string; + contentMD5?: string; + } + } + + export interface CreateBlockBlobRequestOptions extends CreateBlobRequestOptions { + blockSize?: number; + } + + export interface BlobToText { + (error: Error, text: string, result: BlobResult, response: ServiceResponse): void + } + + export interface ListPageRangesRequestOptions extends common.RequestOptions { + rangeStart?: number; + rangeEnd?: number; + } + + export interface BlockListResult { + CommittedBlocks?: Block[]; + UncommittedBlocks?: Block[]; + } + + export interface PutBlockListRequest { + LatestBlocks?: string[]; + CommittedBlocks?: string[]; + UncommittedBlocks?: string[]; + } + + export interface Block { + Name?: string; + Size?: string; + } + } + } + + // ########################### + // ./services/blob/blobutilities + // ########################### + module blobutilities { + export var BlobUtilities: { + SharedAccessPermissions: { + READ: string; + WRITE: string; + DELETE: string; + LIST: string; + }; + BlobListingDetails: { + SNAPSHOTS: string; + METADATA: string; + UNCOMMITTED_BLOBS: string; + COPY: string; + DELETED: string; + }; + SnapshotDeleteOptions: { + SNAPSHOTS_ONLY: string; + BLOB_AND_SNAPSHOTS: string; + }; + BlockListFilter: { + ALL: string; + COMMITTED: string; + UNCOMMITTED: string; + }; + BlobContainerPublicAccessType: { + OFF: string; + CONTAINER: string; + BLOB: string; + }; + SequenceNumberAction: { + MAX: string; + UPDATE: string; + INCREMENT: string; + }; + BlobTier: { + PremiumPageBlobTier: { + P4: string; + P6: string; + P10: string; + P20: string; + P30: string; + P40: string; + P50: string; + P60: string; + }; + StandardBlobTier: { + HOT: string; + COOL: string; + ARCHIVE: string; + }; + } + }; + } + } + + module queue { + export class QueueService extends StorageServiceClient { + /** + * @property {boolean} QueueService#queueMessagEncoder + * @defaultvalue {boolean} true + * A flag indicating whether the message should be base-64 encoded. Default is true. + */ + messageEncoder: QueueMessageEncoder; + + /** + * Creates a new QueueService object. + * + * The QueueService class is used to perform operations on the Microsoft Azure Queue Service. + * + * For more information on using the Queue Service, as well as task focused information on using it from a Node.js application, see + * [How to Use the Queue Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-queues/). + * The following defaults can be set on the Queue service. + * messageEncoder The message encoder to specify how QueueService encodes and decodes the queue message. Default is `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. + * defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Queue service. + * defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Queue service. + * defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Queue service. + * defaultLocationMode The default location mode for requests made via the Queue service. + * useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Queue service; true to use the + * Nagle algorithm; otherwise, false. The default value is false. + * enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use + * http(s).Agent({keepAlive:true}). + * If no connection string or storageaccount and storageaccesskey are provided, + * the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. + * @augments {StorageServiceClient} + * @constructor QueueService + * @param {string} [storageAccountOrConnectionString] The storage account or the connection string. + * @param {string} [storageAccessKey] The storage access key. + * @param {string|object} [host] The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {string} [sasToken] The Shared Access Signature token. + * @param {string} [endpointSuffix] The endpoint suffix. + */ + constructor(storageAccountOrConnectionString?: string, storageAccessKey?: string, host?: string|StorageHost, sasToken?: string, endpointSuffix?: string); + + /** + * Associate a filtering operation with this QueueService. Filtering operations + * can include logging, automatically retrying, etc. Filter operations are objects + * that implement a method with the signature: + * + * "function handle (requestOptions, next)". + * + * After doing its preprocessing on the request options, the method needs to call + * "next" passing a callback with the following signature: + * signature: + * + * "function (returnObject, finalCallback, next)" + * + * In this callback, and after processing the returnObject (the response from the + * request to the server), the callback needs to either invoke next if it exists to + * continue processing other filters or simply invoke finalCallback otherwise to end + * up the service invocation. + * + * @function QueueService#withFilter + * @param {Object} filter The new filter object. + * @return {QueueService} A new service client with the filter applied. + */ + withFilter(newFilter: common.filters.IFilter): QueueService; + + /** + * Gets the service stats for a storage account’s Queue service. + * + * @function QueueService#getServiceStats + * + * @this {QueueService} + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise, `result` + * will contain the stats and `response` + * will contain information related to this operation. + */ + getServiceStats(options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Gets the service stats for a storage account’s Queue service. + * + * @function QueueService#getServiceStats + * + * @this {QueueService} + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise, `result` + * will contain the stats and `response` + * will contain information related to this operation. + */ + getServiceStats(callback: ErrorOrResult): void; + + /** + * Gets the properties of a storage account’s Queue service, including Microsoft Azure Storage Analytics. + * + * @this {QueueService} + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise, `errorOrResult` + * will contain the properties and `response` + * will contain information related to this operation. + */ + getServiceProperties(options: common.RequestOptions, callback?: ErrorOrResult): void; + + /** + * Gets the properties of a storage account’s Queue service, including Microsoft Azure Storage Analytics. + * + * @this {QueueService} + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise, `errorOrResult` + * will contain the properties and `response` + * will contain information related to this operation. + */ + getServiceProperties(callback?: ErrorOrResult): void; + + /** + * Sets the properties of a storage account’s Queue service, including Microsoft Azure Storage Analytics. + * You can also use this operation to set the default request version for all incoming requests that do not have a version specified. + * + * @this {QueueService} + * @param {Object} serviceProperties The service properties. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise, `response` + * will contain information related to this operation. + */ + setServiceProperties(serviceProperties: common.models.ServicePropertiesResult.ServiceProperties, options: common.RequestOptions, callback?: ErrorOrResponse): void; + + /** + * Sets the properties of a storage account’s Queue service, including Microsoft Azure Storage Analytics. + * You can also use this operation to set the default request version for all incoming requests that do not have a version specified. + * + * @this {QueueService} + * @param {Object} serviceProperties The service properties. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise, `response` + * will contain information related to this operation. + */ + setServiceProperties(serviceProperties: common.models.ServicePropertiesResult.ServiceProperties, callback?: ErrorOrResponse): void; + + /** + * Lists a segment containing a collection of queue items whose names begin with the specified prefix under the given account. + * + * @this {QueueService} + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {int} [options.maxResults] Specifies the maximum number of queues to return per call to Azure storage. This does NOT affect list size returned by this function. (maximum: 5000) + * @param {string} [options.include] Include this parameter to specify that the queue's metadata be returned as part of the response body. (allowed values: '', 'metadata') + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of queues and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listQueuesSegmented(currentToken: common.ContinuationToken, options: QueueService.ListQueuesRequestOptions, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of queue items whose names begin with the specified prefix under the given account. + * + * @function QueueService#listQueuesSegmented + * + * @this {QueueService} + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of queues and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listQueuesSegmented(currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of queue items under the given account. + * + * @function QueueService#listQueuesSegmentedWithPrefix + * + * @this {QueueService} + * @param {string} prefix The prefix of the queue name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation.* + * @param {Object} [options] The request options. + * @param {string} [options.marker] String value that identifies the portion of the list to be returned with the next list operation. + * @param {int} [options.maxResults] Specifies the maximum number of queues to return per call to Azure storage. This does NOT affect list size returned by this function. (maximum: 5000) + * @param {string} [options.include] Include this parameter to specify that the queue's metadata be returned as part of the response body. (allowed values: '', 'metadata') + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of queues and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listQueuesSegmentedWithPrefix(prefix: string, currentToken: common.ContinuationToken, options: QueueService.ListQueuesRequestOptions, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of queue items under the given account. + * + * @function QueueService#listQueuesSegmentedWithPrefix + * + * @this {QueueService} + * @param {string} prefix The prefix of the queue name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation.* + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of queues and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listQueuesSegmentedWithPrefix(prefix: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Checks to see if a queue exists. + * + * @function QueueService#doesQueueExist + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise, `result` + * will be true if the queue exists and false if not, + * and `response` will contain information related to this operation. + */ + doesQueueExist(queue: string, options: common.RequestOptions, callback?: ErrorOrResult): void; + + /** + * Checks to see if a queue exists. + * + * @function QueueService#doesQueueExist + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise, `result` + * will be true if the queue exists and false if not, + * and `response` will contain information related to this operation. + */ + doesQueueExist(queue: string, callback?: ErrorOrResult): void; + + /** + * Creates a new queue under the given account. + * + * @function QueueService#createQueue + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the queue information. + * `response` will contain information related to this operation. + */ + createQueue(queue: string, optionsOrCallback: QueueService.CreateQueueRequestOptions, callback?: ErrorOrResult): void; + + /** + * Creates a new queue under the given account. + * + * @function QueueService#createQueue + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the queue information. + * `response` will contain information related to this operation. + */ + createQueue(queue: string, callback?: ErrorOrResult): void; + + /** + * Creates a new queue under the given account if it doesn't exist. + * + * @function QueueService#createQueueIfNotExists + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will be true if the + * queue was created by this operation and false if not, and + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var queueService = azure.createQueueService(); + * queueService.createQueueIfNotExists('taskqueue', function(error) { + * if(!error) { + * // Queue created or exists + * } + * }); + */ + createQueueIfNotExists(queue: string, optionsOrCallback: QueueService.CreateQueueRequestOptions, callback?: ErrorOrResult): void; + + /** + * Creates a new queue under the given account if it doesn't exist. + * + * @function QueueService#createQueueIfNotExists + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will be true if the + * queue was created by this operation and false if not, and + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var queueService = azure.createQueueService(); + * queueService.createQueueIfNotExists('taskqueue', function(error) { + * if(!error) { + * // Queue created or exists + * } + * }); + */ + createQueueIfNotExists(queue: string, callback?: ErrorOrResult): void; + + /** + * Permanently deletes the specified queue. + * + * @function QueueService#deleteQueue + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + deleteQueue(queue: string, options: common.RequestOptions, callback: ErrorOrResponse): void; + + /** + * Permanently deletes the specified queue. + * + * @function QueueService#deleteQueue + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + deleteQueue(queue: string, callback: ErrorOrResponse): void; + + /** + * Permanently deletes the specified queue if it exists. + * + * @function QueueService#deleteQueueIfExists + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * 'true' if the queue was deleted and 'false' if the queue did not exist. + * `response` will contain information related to this operation. + */ + deleteQueueIfExists(queue: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Permanently deletes the specified queue if it exists. + * + * @function QueueService#deleteQueueIfExists + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * 'true' if the queue was deleted and 'false' if the queue did not exist. + * `response` will contain information related to this operation. + */ + deleteQueueIfExists(queue: string, callback: ErrorOrResult): void; + + /** + * Returns queue properties, including user-defined metadata. + * + * @function QueueService#getQueueMetadata + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the queue information. + * `response` will contain information related to this operation. + */ + getQueueMetadata(queue: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Returns queue properties, including user-defined metadata. + * + * @function QueueService#getQueueMetadata + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the queue information. + * `response` will contain information related to this operation. + */ + getQueueMetadata(queue: string, callback: ErrorOrResult): void; + + /** + * Sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. + * + * @function QueueService#setQueueMetadata + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} metadata The metadata key/value pairs. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the queue information. + * `response` will contain information related to this operation. + */ + setQueueMetadata(queue: string, metadata: { [key: string]: string; }, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. + * + * @function QueueService#setQueueMetadata + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} metadata The metadata key/value pairs. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the queue information. + * `response` will contain information related to this operation. + */ + setQueueMetadata(queue: string, metadata: { [key: string]: string; }, callback: ErrorOrResult): void; + + /** + * Adds a new message to the back of the message queue. + * The encoded message can be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size for previous versions. + * Unencoded messages must be in a format that can be included in an XML request with UTF-8 encoding. + * Queue messages are encoded by default. See queueService.messageEncoder to set encoding defaults. + * + * @function QueueService#createMessage + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {string|Buffer} messageText The message text. + * @param {Object} [options] The request options. + * @param {int} [options.messageTimeToLive] The time-to-live interval for the message, in seconds. The maximum time-to-live allowed is 7 days. If this parameter is omitted, the default time-to-live is 7 days + * @param {int} [options.visibilityTimeout] Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value smaller than the time-to-live value. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain + * the message. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var queueService = azure.createQueueService(); + * queueService.createMessage('taskqueue', 'Hello world!', function(error) { + * if(!error) { + * // Message inserted + * } + * }); + */ + createMessage(queue: string, messageText: string|Buffer, options: QueueService.CreateMessageRequestOptions, callback?: ErrorOrResult): void; + + /** + * Adds a new message to the back of the message queue. + * The encoded message can be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size for previous versions. + * Unencoded messages must be in a format that can be included in an XML request with UTF-8 encoding. + * Queue messages are encoded by default. See queueService.messageEncoder to set encoding defaults. + * + * @function QueueService#createMessage + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {string|Buffer} messageText The message text. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var queueService = azure.createQueueService(); + * queueService.createMessage('taskqueue', 'Hello world!', function(error) { + * if(!error) { + * // Message inserted + * } + * }); + */ + createMessage(queue: string, messageText: string|Buffer, callback?: ErrorOrResult): void; + + /** + * Retrieves messages from the queue and makes them invisible to other consumers. + * + * @function QueueService#getMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {int} [options.numOfMessages] A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. By default, a single message is retrieved from the queue with this operation. + * @param {int} [options.visibilityTimeout] Required if not peek only. Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the messages. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var queueService = azure.createQueueService(); + * var queueName = 'taskqueue'; + * queueService.getMessages(queueName, function(error, serverMessages) { + * if(!error) { + * // Process the message in less than 30 seconds, the message + * // text is available in serverMessages[0].messagetext + * queueService.deleteMessage(queueName, serverMessages[0].messageId, serverMessages[0].popReceipt, function(error) { + * if(!error){ + * // Message deleted + * } + * }); + * } + * }); + */ + getMessages(queue: string, options: QueueService.GetMessagesRequestOptions, callback?: ErrorOrResult): void; + + /** + * Retrieves messages from the queue and makes them invisible to other consumers. + * + * @function QueueService#getMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the messages. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var queueService = azure.createQueueService(); + * var queueName = 'taskqueue'; + * queueService.getMessages(queueName, function(error, serverMessages) { + * if(!error) { + * // Process the message in less than 30 seconds, the message + * // text is available in serverMessages[0].messagetext + * queueService.deleteMessage(queueName, serverMessages[0].messageId, serverMessages[0].popReceipt, function(error) { + * if(!error){ + * // Message deleted + * } + * }); + * } + * }); + */ + getMessages(queue: string, callback?: ErrorOrResult): void; + + /** + * Retrieves a message from the queue and makes it invisible to other consumers. + * + * @function QueueService#getMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {int} [options.visibilityTimeout] Required if not peek only. Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the message. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var queueService = azure.createQueueService(); + * var queueName = 'taskqueue'; + * queueService.getMessage(queueName, function(error, serverMessage) { + * if(!error) { + * // Process the message in less than 30 seconds, the message + * // text is available in serverMessage.messagetext + * queueService.deleteMessage(queueName, serverMessage.messageId, serverMessage.popReceipt, function(error) { + * if(!error){ + * // Message deleted + * } + * }); + * } + * }); + */ + getMessage(queue: string, options: QueueService.GetMessageRequestOptions, callback?: ErrorOrResult): void; + + /** + * Retrieves a message from the queue and makes it invisible to other consumers. + * + * @function QueueService#getMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the message. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var queueService = azure.createQueueService(); + * var queueName = 'taskqueue'; + * queueService.getMessage(queueName, function(error, serverMessages) { + * if(!error) { + * // Process the message in less than 30 seconds, the message + * // text is available in serverMessages.messagetext + * queueService.deleteMessage(queueName, serverMessages.messageId, serverMessages.popReceipt, function(error) { + * if(!error){ + * // Message deleted + * } + * }); + * } + * }); + */ + getMessage(queue: string, callback?: ErrorOrResult): void; + + /** + * Retrieves messages from the front of the queue, without changing the messages visibility. + * + * @function QueueService#peekMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {int} [options.numOfMessages] A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. By default, a single message is retrieved from the queue with this operation. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the messages. + * `response` will contain information related to this operation. + */ + peekMessages(queue: string, options: QueueService.PeekMessagesRequestOptions, callback?: ErrorOrResult): void; + + /** + * Retrieves messages from the front of the queue, without changing the messages visibility. + * + * @function QueueService#peekMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the messages. + * `response` will contain information related to this operation. + */ + peekMessages(queue: string, callback?: ErrorOrResult): void; + + /** + * Retrieves a message from the front of the queue, without changing the message visibility. + * + * @function QueueService#peekMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the message. + * `response` will contain information related to this operation. + */ + peekMessage(queue: string, options: common.RequestOptions, callback?: ErrorOrResult): void; + + /** + * Retrieves a message from the front of the queue, without changing the message visibility. + * + * @function QueueService#peekMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the message. + * `response` will contain information related to this operation. + */ + peekMessage(queue: string, callback?: ErrorOrResult): void; + + /** + * Deletes a specified message from the queue. + * + * @function QueueService#deleteMessage + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {string} messageId The message identifier of the message to delete. + * @param {string} popReceipt A valid pop receipt value returned from an earlier call to the Get Messages or Update Message operation + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + deleteMessage(queue: string, messageId: string, popReceipt: string, options: common.RequestOptions, callback: ErrorOrResponse): void; + + /** + * Deletes a specified message from the queue. + * + * @function QueueService#deleteMessage + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {string} messageId The message identifier of the message to delete. + * @param {string} popReceipt A valid pop receipt value returned from an earlier call to the Get Messages or Update Message operation + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + deleteMessage(queue: string, messageId: string, popReceipt: string, callback: ErrorOrResponse): void; + + /** + * Clears all messages from the queue. + * + * @function QueueService#clearMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + clearMessages(queue: string, options: common.RequestOptions, callback: ErrorOrResponse): void; + + /** + * Clears all messages from the queue. + * + * @function QueueService#clearMessages + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + clearMessages(queue: string, callback: ErrorOrResponse): void; + + /** + * Updates the visibility timeout of a message. You can also use this operation to update the contents of a message. + * A message must be in a format that can be included in an XML request with UTF-8 encoding, and the encoded message can be up to 64KB in size. + * + * @function QueueService#updateMessage + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {string} messageId The message identifier of the message to update. + * @param {string} popReceipt A valid pop receipt value returned from an earlier call to the Get Messages or Update Message operation + * @param {int} visibilityTimeout Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. + * @param {Object} [options] The request options. + * @param {Object} [options.messageText] The new message text. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the message result information. + * `response` will contain information related to this operation. + */ + updateMessage(queue: string, messageId: string, popReceipt: string, visibilityTimeout: number, options: QueueService.UpdateMessageRequestOptions, callback: ErrorOrResult): void; + + /** + * Updates the visibility timeout of a message. You can also use this operation to update the contents of a message. + * A message must be in a format that can be included in an XML request with UTF-8 encoding, and the encoded message can be up to 64KB in size. + * + * @function QueueService#updateMessage + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {string} messageId The message identifier of the message to update. + * @param {string} popReceipt A valid pop receipt value returned from an earlier call to the Get Messages or Update Message operation + * @param {int} visibilityTimeout Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the message result information. + * `response` will contain information related to this operation. + */ + updateMessage(queue: string, messageId: string, popReceipt: string, visibilityTimeout: number, callback?: ErrorOrResult): void; + + /** + * Gets the queue's ACL. + * + * @function QueueService#getQueueAcl + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the queue. + * `response` will contain information related to this operation. + */ + getQueueAcl(queue: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Gets the queue's ACL. + * + * @function QueueService#getQueueAcl + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the queue. + * `response` will contain information related to this operation. + */ + getQueueAcl(queue: string, callback: ErrorOrResult): void; + + /** + * Updates the queue's ACL. + * + * @function QueueService#setQueueAcl + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} signedIdentifiers The signed identifiers. Signed identifiers must be in an array. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the queue. + * `response` will contain information related to this operation. + * @example + * var azure = require('azure-storage'); + * var SharedAccessPermissions = azure.QueueUtilities.SharedAccessPermissions; + * var queueService = azure.createQueueService(); + * var sharedAccessPolicy = [ + * {AccessPolicy: { + * Permissions: PROCESS, + * Start: startDate, + * Expiry: expiryDate + * }, + * Id: processOnly, + * }, + * {AccessPolicy: { + * Permissions: SharedAccessPermissions.PROCESS + SharedAccessPermissions.DELETE, + * Start: startDate, + * Expiry: expiryDate + * }, + * Id: processAndDelete, + * }]; + * + * queueService.setQueueAcl(queueName, sharedAccessPolicy, function(error, queueResult, response) { + * // do whatever + * }); + */ + setQueueAcl(queue: string, signedIdentifiers: {[key:string]: common.AccessPolicy}, options: common.RequestOptions, callback?: ErrorOrResult): void; + + /** + * Updates the queue's ACL. + * + * @function QueueService#setQueueAcl + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} signedIdentifiers The signed identifiers. Signed identifiers must be in an array. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the queue. + * `response` will contain information related to this operation. + * @example + * var azure = require('azure-storage'); + * var SharedAccessPermissions = azure.QueueUtilities.SharedAccessPermissions; + * var queueService = azure.createQueueService(); + * var sharedAccessPolicy = [ + * {AccessPolicy: { + * Permissions: PROCESS, + * Start: startDate, + * Expiry: expiryDate + * }, + * Id: processOnly, + * }, + * {AccessPolicy: { + * Permissions: SharedAccessPermissions.PROCESS + SharedAccessPermissions.DELETE, + * Start: startDate, + * Expiry: expiryDate + * }, + * Id: processAndDelete, + * }]; + * + * queueService.setQueueAcl(queueName, sharedAccessPolicy, function(error, queueResult, response) { + * // do whatever + * }); + */ + setQueueAcl(queue: string, signedIdentifiers: {[key:string]: common.AccessPolicy}, callback?: ErrorOrResult): void; + + /** + * Retrieves a shared access signature token. + * + * @function QueueService#generateSharedAccessSignature + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} sharedAccessPolicy The shared access policy. + * @param {string} [sharedAccessPolicy.Id] The signed identifier. + * @param {Object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. + * @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). + * @param {date|string} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired (The UTC value will be used). + * @return {string} The shared access signature query string. Note this string does not contain the leading "?". + */ + generateSharedAccessSignature(queue: string, sharedAccessPolicy: common.SharedAccessPolicy): string; + + /** + * Retrieves a shared access signature token. + * + * @function QueueService#generateSharedAccessSignatureWithVersion + * + * @this {QueueService} + * @param {string} queue The queue name. + * @param {Object} sharedAccessPolicy The shared access policy. + * @param {string} [sharedAccessPolicy.Id] The signed identifier. + * @param {Object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. + * @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). + * @param {date|string} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired (The UTC value will be used). + * @param {string} [sasVersion] An optional string indicating the desired SAS version to use. Value must be 2012-02-12 or later. + * @return {string} The shared access signature query string. Note this string does not contain the leading "?". + */ + generateSharedAccessSignatureWithVersion(queue: string, sharedAccessPolicy: common.SharedAccessPolicy, sasVersion: string): string; + + getUrl(queue: string, sasToken?: string, primary?: boolean): string; + } + + module QueueService { + + export interface ListQueueResult { + entries: QueueResult[]; + continuationToken?: common.ContinuationToken; + } + + export interface QueueMessageResult { + queue?: string; + messageId?: string; + popReceipt?: string; + messageText?: string; + timeNextVisible?: string; + insertionTime?: string; + expirationTime?: string; + dequeueCount?: number; + } + + export interface QueueResult { + name: string; + metadata?: { [key: string]: string; }; + approximateMessageCount?: number; + signedIdentifiers: {[key:string]: common.AccessPolicy}; + exists?: boolean; + created?: boolean; + } + + export interface CreateQueueRequestOptions extends common.RequestOptions { + /** {Object} The metadata key/value pairs. */ + metadata?: { [key: string]: string; }; + } + + export interface ListQueuesRequestOptions extends common.RequestOptions { + /** {string} String value that identifies the portion of the list to be returned with the next list operation. */ + marker?: string; + /** {int} Specifies the maximum number of queues to return per call to Azure storage. This does NOT affect list size returned by this function. (maximum: 5000) */ + maxResults?: number; + /** {string} Include this parameter to specify that the queue's metadata be returned as part of the response body. (allowed values: '', 'metadata') */ + include?: string; + } + + export interface PeekMessagesRequestOptions extends common.RequestOptions { + numOfMessages?: number; + } + + export interface GetMessagesRequestOptions extends common.RequestOptions { + numOfMessages?: number; + visibilityTimeout?: number; + } + + export interface GetMessageRequestOptions extends common.RequestOptions { + visibilityTimeout?: number; + } + + export interface UpdateMessageRequestOptions extends common.RequestOptions { + messageText?: string|Buffer; + } + + export interface CreateMessageRequestOptions extends common.RequestOptions { + /** + * {int} The time-to-live interval for the message, in seconds. The maximum time-to-live allowed is 7 days. If this parameter + * is omitted, the default time-to-live is 7 days + */ + messageTimeToLive?: number; + /** + * {int} Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or + * equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than + * the expiry time. visibilitytimeout should be set to a value smaller than the time-to-live value. + */ + visibilityTimeout?: number; + } + } + + export interface QueueMessageEncoder { + encode(input: any) : string; + decode(textToBeDecoded: string) : any; + } + + module QueueMessageEncoder{ + export class TextBase64QueueMessageEncoder implements QueueMessageEncoder { + encode(input: string) : string; + decode(textToBeDecoded: string) : string; + } + + export class BinaryBase64QueueMessageEncoder implements QueueMessageEncoder { + encode(input: Buffer) : string; + decode(textToBeDecoded: string) : Buffer; + } + + export class TextXmlQueueMessageEncoder implements QueueMessageEncoder { + encode(input: string) : string; + decode(textToBeDecoded: string) : string; + } + } + + /** + * Defines enums for use with the Queue service. + * @namespace QueueUtilities + */ + var QueueUtilities: { + /** + * Permission types. + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + READ: string; + ADD: string; + UPDATE: string; + PROCESS: string; + }; + }; + } + + module table { + export interface TableService extends StorageServiceClient { + defaultPayloadFormat: string; + + /** + * Associate a filtering operation with this TableService. Filtering operations + * can include logging, automatically retrying, etc. Filter operations are objects + * that implement a method with the signature: + * + * "function handle (requestOptions, next)". + * + * After doing its preprocessing on the request options, the method needs to call + * "next" passing a callback with the following signature: + * signature: + * + * "function (returnObject, finalCallback, next)" + * + * In this callback, and after processing the returnObject (the response from the + * request to the server), the callback needs to either invoke next if it exists to + * continue processing other filters or simply invoke finalCallback otherwise to end + * up the service invocation. + * + * @function TableService#withFilter + * @param {Object} filter The new filter object. + * @return {TableService} A new service client with the filter applied. + */ + withFilter(newFilter: common.filters.IFilter): TableService; + + /** + * Gets the service stats for a storage account’s Table service. + * + * @this {TableService} + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the properties. + * `response` will contain information related to this operation. + */ + getServiceStats(options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Gets the service stats for a storage account’s Table service. + * + * @this {TableService} + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the properties. + * `response` will contain information related to this operation. + */ + getServiceStats(callback: ErrorOrResult): void; + + /** + * Gets the properties of a storage account’s Table service, including Azure Storage Analytics. + * + * @this {TableService} + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the properties. + * `response` will contain information related to this operation. + */ + getServiceProperties(options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Gets the properties of a storage account’s Table service, including Azure Storage Analytics. + * + * @this {TableService} + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the properties. + * `response` will contain information related to this operation. + */ + getServiceProperties(callback: ErrorOrResult): void; + + /** + * Sets the properties of a storage account’s Table service, including Azure Storage Analytics. + * You can also use this operation to set the default request version for all incoming requests that do not have a version specified. + * + * @this {TableService} + * @param {Object} serviceProperties The service properties. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + setServiceProperties(serviceProperties: common.models.ServicePropertiesResult.ServiceProperties, options: common.RequestOptions, callback: ErrorOrResponse): void; + + /** + * Sets the properties of a storage account’s Table service, including Azure Storage Analytics. + * You can also use this operation to set the default request version for all incoming requests that do not have a version specified. + * + * @this {TableService} + * @param {Object} serviceProperties The service properties. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + setServiceProperties(serviceProperties: common.models.ServicePropertiesResult.ServiceProperties, callback: ErrorOrResponse): void; + + /** + * Lists a segment containing a collection of table items under the specified account. + * + * @this {TableService} + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The create options or callback function. + * @param {int} [options.maxResults] Specifies the maximum number of tables to return per call to Azure ServiceClient. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {string} [options.payloadFormat] The payload format to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of tables and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listTablesSegmented(currentToken: TableService.ListTablesContinuationToken, options: TableService.ListTablesRequestOptions, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of table items under the specified account. + * + * @this {TableService} + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of tables and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listTablesSegmented(currentToken: TableService.ListTablesContinuationToken, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of table items under the specified account. + * + * @this {TableService} + * @param {string} prefix The prefix of the table name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The create options or callback function. + * @param {int} [options.maxResults] Specifies the maximum number of tables to return per call to Azure ServiceClient. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {string} [options.payloadFormat] The payload format to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of tables and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listTablesSegmentedWithPrefix(prefix: string, currentToken: TableService.ListTablesContinuationToken, options: TableService.ListTablesRequestOptions, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of table items under the specified account. + * + * @this {TableService} + * @param {string} prefix The prefix of the table name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of tables and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listTablesSegmentedWithPrefix(prefix: string, currentToken: TableService.ListTablesContinuationToken, callback: ErrorOrResult): void; + + /** + * Gets the table's ACL. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the ACL information for the table. + * `response` will contain information related to this operation. + */ + getTableAcl(table: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Gets the table's ACL. + * + * @this {TableService} + * @param {string} table The table name. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the ACL information for the table. + * `response` will contain information related to this operation. + */ + getTableAcl(table: string, callback: ErrorOrResult): void; + + /** + * Updates the table's ACL. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} signedIdentifiers The signed identifiers. Signed identifiers must be in an array. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain information for the table. + * `response` will contain information related to this operation. + */ + setTableAcl(table: string, signedIdentifiers: {[key:string]: common.AccessPolicy}, options: common.RequestOptions, callback: ErrorOrResult<{ + TableName: string; + signedIdentifiers: {[key:string]: common.AccessPolicy}; + }>): void; + + /** + * Updates the table's ACL. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} signedIdentifiers The signed identifiers. Signed identifiers must be in an array. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain information for the table. + * `response` will contain information related to this operation. + */ + setTableAcl(table: string, signedIdentifiers: {[key:string]: common.AccessPolicy}, callback: ErrorOrResult<{ + TableName: string; + signedIdentifiers: {[key:string]: common.AccessPolicy}; + }>): void; + + /** + * Retrieves a shared access signature token. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} sharedAccessPolicy The shared access policy. + * @param {string} [sharedAccessPolicy.Id] The signed identifier. + * @param {Object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. + * @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). + * @param {date|string} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired (The UTC value will be used). + * @param {string} [sharedAccessPolicy.AccessPolicy.StartPk] The starting Partition Key for which the SAS will be valid. + * @param {string} [sharedAccessPolicy.AccessPolicy.EndPk] The ending Partition Key for which the SAS will be valid. + * @param {string} [sharedAccessPolicy.AccessPolicy.StartRk] The starting Row Key for which the SAS will be valid. + * @param {string} [sharedAccessPolicy.AccessPolicy.EndRk] The ending Row Key for which the SAS will be valid. + * @return {Object} An object with the shared access signature. + */ + generateSharedAccessSignature(table: string, sharedAccessPolicy: TableService.TableSharedAccessPolicy): string; + + /** + * Retrieves a shared access signature token. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} sharedAccessPolicy The shared access policy. + * @param {string} [sharedAccessPolicy.Id] The signed identifier. + * @param {Object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. + * @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). + * @param {date|string} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired (The UTC value will be used). + * @param {string} [sharedAccessPolicy.AccessPolicy.StartPk] The starting Partition Key for which the SAS will be valid. + * @param {string} [sharedAccessPolicy.AccessPolicy.EndPk] The ending Partition Key for which the SAS will be valid. + * @param {string} [sharedAccessPolicy.AccessPolicy.StartRk] The starting Row Key for which the SAS will be valid. + * @param {string} [sharedAccessPolicy.AccessPolicy.EndRk] The ending Row Key for which the SAS will be valid. + * @param {string} [sasVersion] An optional string indicating the desired SAS version to use. Value must be 2012-02-12 or later. + * @return {Object} An object with the shared access signature. + */ + generateSharedAccessSignatureWithVersion(table: string, sharedAccessPolicy: TableService.TableSharedAccessPolicy, sasVersion: string): string; + + /** + * Checks whether or not a table exists on the service. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain be true if the table exists, or false if the table does not exist. + * `response` will contain information related to this operation. + */ + doesTableExist(table: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Checks whether or not a table exists on the service. + * + * @this {TableService} + * @param {string} table The table name. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain be true if the table exists, or false if the table does not exist. + * `response` will contain information related to this operation. + */ + doesTableExist(table: string, callback: ErrorOrResult): void; + + /** + * Creates a new table within a storage account. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the new table information. + * `response` will contain information related to this operation. + */ + createTable(table: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Creates a new table within a storage account. + * + * @this {TableService} + * @param {string} table The table name. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the new table information. + * `response` will contain information related to this operation. + */ + createTable(table: string, callback: ErrorOrResult): void; + + /** + * Creates a new table within a storage account if it does not exists. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * `result` will be `true` if table was created, false otherwise + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var tableService = azure.createTableService(); + * tableService.createTableIfNotExists('tasktable', function(error) { + * if(!error) { + * // Table created or exists + * } + * }); + */ + createTableIfNotExists(table: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Creates a new table within a storage account if it does not exists. + * + * @this {TableService} + * @param {string} table The table name. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * `result` will be `true` if table was created, false otherwise + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var tableService = azure.createTableService(); + * tableService.createTableIfNotExists('tasktable', function(error) { + * if(!error) { + * // Table created or exists + * } + * }); + */ + createTableIfNotExists(table: string, callback: ErrorOrResult): void; + + /** + * Deletes a table from a storage account. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + deleteTable(table: string, options: common.RequestOptions, callback: ErrorOrResponse): void; + + /** + * Deletes a table from a storage account. + * + * @this {TableService} + * @param {string} table The table name. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + deleteTable(table: string, callback: ErrorOrResponse): void; + + /** + * Deletes a table from a storage account, if it exists. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * `result` will be `true` if table was deleted, false otherwise + * `response` will contain information related to this operation. + */ + deleteTableIfExists(table: string, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Deletes a table from a storage account, if it exists. + * + * @this {TableService} + * @param {string} table The table name. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * `result` will be `true` if table was deleted, false otherwise + * `response` will contain information related to this operation. + */ + deleteTableIfExists(table: string, callback: ErrorOrResult): void; + + /** + * Queries data in a table. To retrieve a single entity by partition key and row key, use retrieve entity. + * + * @this {TableService} + * @param {string} table The table name. + * @param {TableQuery} tableQuery The query to perform. Use null, undefined, or new TableQuery() to get all of the entities in the table. + * @param {Object} currentToken A continuation token returned by a previous listing operation. + * Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {string} [options.payloadFormat] The payload format to use for the request. + * @param {bool} [options.autoResolveProperties] If true, guess at all property types. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {Function(entity)} [options.entityResolver] The entity resolver. Given a single entity returned by the query, returns a modified object which is added to + * the entities array. + * @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, + * and the property Edm type if given by the service, returns the Edm type of the property. + * @param {TableService~queryResponse} callback `error` will contain information if an error occurs; + * otherwise `entities` will contain the entities returned by the query. + * If more matching entities exist, and could not be returned, + * `queryResultContinuation` will contain a continuation token that can be used + * to retrieve the next set of results. + * `response` will contain information related to this operation. + * + * The logic for returning entity types can get complicated. Here is the algorithm used: + * ``` + * var propertyType; + * + * if (propertyResovler) { // If the caller provides a propertyResolver in the options, use it + * propertyType = propertyResolver(partitionKey, rowKey, propertyName, propertyValue, propertyTypeFromService); + * } else if (propertyTypeFromService) { // If the service provides us a property type, use it. See below for an explanation of when this will and won't occur. + * propertyType = propertyTypeFromService; + * } else if (autoResolveProperties) { // If options.autoResolveProperties is set to true + * if (javascript type is string) { // See below for an explanation of how and why autoResolveProperties works as it does. + * propertyType = 'Edm.String'; + * } else if (javascript type is boolean) { + * propertyType = 'Edm.Boolean'; + * } + * } + * + * if (propertyType) { + * // Set the property type on the property. + * } else { + * // Property gets no EdmType. + * } + * ``` + * Notes: + * + * * The service only provides a type if JsonFullMetadata or JsonMinimalMetadata is used, and if the type is Int64, Guid, Binary, or DateTime. + * * Explanation of autoResolveProperties: + * * String gets correctly resolved to 'Edm.String'. + * * Int64, Guid, Binary, and DateTime all get resolved to 'Edm.String.' This only happens if JsonNoMetadata is used (otherwise the service will provide the propertyType in a prior step). + * * Boolean gets correctly resolved to 'Edm.Boolean'. + * * For both Int32 and Double, no type information is returned, even in the case of autoResolveProperties = true. This is due to an + * inability to distinguish between the two in certain cases. + * + * @example + * var azure = require('azure-storage'); + * var tableService = azure.createTableService(); + * // tasktable should already exist and have entities + * + * // returns all entities in tasktable, and a continuation token for the next page of results if necessary + * tableService.queryEntities('tasktable', null, null \/*currentToken*\/, function(error, result) { + * if(!error) { + * var entities = result.entities; + * // do stuff with the returned entities if there are any + * } + * }); + * + * // returns field1 and field2 of the entities in tasktable, and a continuation token for the next page of results if necessary + * var tableQuery = new TableQuery().select('field1', 'field2'); + * tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result) { + * if(!error) { + * var entities = result.entities; + * // do stuff with the returned entities if there are any + * } + * }); + */ + queryEntities(table: string, tableQuery: TableQuery, currentToken: TableService.TableContinuationToken, options: TableService.TableEntityRequestOptions, callback: ErrorOrResult>): void; + + /** + * Queries data in a table. To retrieve a single entity by partition key and row key, use retrieve entity. + * + * @this {TableService} + * @param {string} table The table name. + * @param {TableQuery} tableQuery The query to perform. Use null, undefined, or new TableQuery() to get all of the entities in the table. + * @param {Object} currentToken A continuation token returned by a previous listing operation. + * Please use 'null' or 'undefined' if this is the first operation. + * @param {TableService~queryResponse} callback `error` will contain information if an error occurs; + * otherwise `entities` will contain the entities returned by the query. + * If more matching entities exist, and could not be returned, + * `queryResultContinuation` will contain a continuation token that can be used + * to retrieve the next set of results. + * `response` will contain information related to this operation. + */ + queryEntities(table: string, tableQuery: TableQuery, currentToken: TableService.TableContinuationToken, callback: ErrorOrResult>): void; + + /** + * Retrieves an entity from a table. + * + * @this {TableService} + * @param {string} table The table name. + * @param {string} partitionKey The partition key. + * @param {string} rowKey The row key. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {string} [options.payloadFormat] The payload format to use for the request. + * @param {bool} [options.autoResolveProperties] If true, guess at all property types. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, + * and the property Edm type if given by the service, returns the Edm type of the property. + * @param {Function(entity)} [options.entityResolver] The entity resolver. Given the single entity returned by the query, returns a modified object. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will be the matching entity. + * `response` will contain information related to this operation. + * + * The logic for returning entity types can get complicated. Here is the algorithm used: + * ``` + * var propertyType; + * + * if (propertyResovler) { // If the caller provides a propertyResolver in the options, use it + * propertyType = propertyResolver(partitionKey, rowKey, propertyName, propertyValue, propertyTypeFromService); + * } else if (propertyTypeFromService) { // If the service provides us a property type, use it. See below for an explanation of when this will and won't occur. + * propertyType = propertyTypeFromService; + * } else if (autoResolveProperties) { // If options.autoResolveProperties is set to true + * if (javascript type is string) { // See below for an explanation of how and why autoResolveProperties works as it does. + * propertyType = 'Edm.String'; + * } else if (javascript type is boolean) { + * propertyType = 'Edm.Boolean'; + * } + * } + * + * if (propertyType) { + * // Set the property type on the property. + * } else { + * // Property gets no EdmType. + * } + * ``` + * Notes: + * + * * The service only provides a type if JsonFullMetadata or JsonMinimalMetadata is used, and if the type is Int64, Guid, Binary, or DateTime. + * * Explanation of autoResolveProperties: + * * String gets correctly resolved to 'Edm.String'. + * * Int64, Guid, Binary, and DateTime all get resolved to 'Edm.String.' This only happens if JsonNoMetadata is used (otherwise the service will provide the propertyType in a prior step). + * * Boolean gets correctly resolved to 'Edm.Boolean'. + * * For both Int32 and Double, no type information is returned, even in the case of autoResolveProperties = true. This is due to an + * inability to distinguish between the two in certain cases. + * + * @example + * var azure = require('azure-storage'); + * var tableService = azure.createTableService(); + * tableService.retrieveEntity('tasktable', 'tasksSeattle', '1', function(error, serverEntity) { + * if(!error) { + * // Entity available in serverEntity variable + * } + * }); + */ + retrieveEntity(table: string, partitionKey: string, rowKey: string, options: TableService.TableEntityRequestOptions, callback: ErrorOrResult): void; + + /** + * Retrieves an entity from a table. + * + * @this {TableService} + * @param {string} table The table name. + * @param {string} partitionKey The partition key. + * @param {string} rowKey The row key. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {string} [options.payloadFormat] The payload format to use for the request. + * @param {bool} [options.autoResolveProperties] If true, guess at all property types. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, + * and the property Edm type if given by the service, returns the Edm type of the property. + * @param {Function(entity)} [options.entityResolver] The entity resolver. Given the single entity returned by the query, returns a modified object. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will be the matching entity. + * `response` will contain information related to this operation. + */ + retrieveEntity(table: string, partitionKey: string, rowKey: string, callback: ErrorOrResult): void; + + /** + * Inserts a new entity into a table. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {Object} [options] The request options. + * @param {bool} [options.echoContent] Whether or not to return the entity upon a successful insert. Default to false. + * @param {string} [options.payloadFormat] The payload format to use in the response, if options.echoContent is true. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Only applied if echoContent is true. Given the partition key, row key, property name, + * property value, and the property Edm type if given by the service, returns the Edm type of the property. + * @param {Function(entity)} [options.entityResolver] The entity resolver. Only applied if echoContent is true. Given the single entity returned by the insert, returns + * a modified object. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var tableService = azure.createTableService(); + * var task1 = { + * PartitionKey : {'_': 'tasksSeattle', '$':'Edm.String'}, + * RowKey: {'_': '1', '$':'Edm.String'}, + * Description: {'_': 'Take out the trash', '$':'Edm.String'}, + * DueDate: {'_': new Date(2011, 12, 14, 12), '$':'Edm.DateTime'} + * }; + * tableService.insertEntity('tasktable', task1, function(error) { + * if(!error) { + * // Entity inserted + * } + * }); + */ + insertEntity(table: string, entityDescriptor: T, options: TableService.InsertEntityRequestOptions, callback: ErrorOrResult): void; + + /** + * Inserts a new entity into a table. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + */ + insertEntity(table: string, entityDescriptor: T, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Inserts a new entity into a table. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + */ + insertEntity(table: string, entityDescriptor: T, callback: ErrorOrResult): void; + + /** + * Inserts or updates a new entity into a table. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + */ + insertOrReplaceEntity(table: string, entityDescriptor: T, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Inserts or updates a new entity into a table. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + */ + insertOrReplaceEntity(table: string, entityDescriptor: T, callback: ErrorOrResult): void; + + /** + * Replaces an existing entity within a table. To replace conditionally based on etag, set entity['.metadata']['etag']. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + */ + replaceEntity(table: string, entityDescriptor: T, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Replaces an existing entity within a table. To replace conditionally based on etag, set entity['.metadata']['etag']. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + */ + replaceEntity(table: string, entityDescriptor: T, callback: ErrorOrResult): void; + + /** + * Updates an existing entity within a table by merging new property values into the entity. To merge conditionally based on etag, set entity['.metadata']['etag']. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * response` will contain information related to this operation. + */ + mergeEntity(table: string, entityDescriptor: T, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Updates an existing entity within a table by merging new property values into the entity. To merge conditionally based on etag, set entity['.metadata']['etag']. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * response` will contain information related to this operation. + */ + mergeEntity(table: string, entityDescriptor: T, callback: ErrorOrResult): void; + + /** + * Inserts or updates an existing entity within a table by merging new property values into the entity. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + */ + insertOrMergeEntity(table: string, entityDescriptor: T, options: common.RequestOptions, callback: ErrorOrResult): void; + + /** + * Inserts or updates an existing entity within a table by merging new property values into the entity. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the entity information. + * `response` will contain information related to this operation. + */ + insertOrMergeEntity(table: string, entityDescriptor: T, callback: ErrorOrResult): void; + + /** + * Deletes an entity within a table. To delete conditionally based on etag, set entity['.metadata']['etag']. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + deleteEntity(table: string, entityDescriptor: T, options: common.RequestOptions, callback: ErrorOrResponse): void; + + /** + * Deletes an entity within a table. To delete conditionally based on etag, set entity['.metadata']['etag']. + * + * @this {TableService} + * @param {string} table The table name. + * @param {Object} entityDescriptor The entity descriptor. + * @param {errorOrResponse} callback `error` will contain information if an error occurs; + * `response` will contain information related to this operation. + */ + deleteEntity(table: string, entityDescriptor: T, callback: ErrorOrResponse): void; + + /** + * Executes the operations in the batch. + * + * @this {TableService} + * @param {string} table The table name. + * @param {TableBatch} batch The table batch to execute. + * @param {Object} [options] The create options or callback function. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain responses for each operation executed in the batch; + * `result.entity` will contain the entity information for each operation executed. + * `result.response` will contain the response for each operations executed. + * `response` will contain information related to this operation. + */ + executeBatch(table: string, batch: TableBatch, options: TableService.TableEntityRequestOptions, callback: ErrorOrResult): void; + + /** + * Executes the operations in the batch. + * + * @this {TableService} + * @param {string} table The table name. + * @param {TableBatch} batch The table batch to execute. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain responses for each operation executed in the batch; + * `result.entity` will contain the entity information for each operation executed. + * `result.response` will contain the response for each operations executed. + * `response` will contain information related to this operation. + */ + executeBatch(table: string, batch: TableBatch, callback: ErrorOrResult): void; + + getUrl(table: string, sasToken?: string, primary?: boolean): string; + } + + export module TableService { + + export interface TableResult { + isSuccessful?: boolean; + statusCode?: string | number; + TableName?: string; + exists?: boolean; + created?: boolean; + } + + export interface TableAccessPolicy extends common.AccessPolicy { + StartPk?: string; + EndPk?: string; + StartRk?: string; + EndRk?: string; + } + + export interface TableSharedAccessPolicy { + /** The signed identifier. */ + Id?: string; + /** The Table Access Policy information */ + AccessPolicy: TableAccessPolicy; + } + + export interface ListTablesRequestOptions extends common.RequestOptions { + maxResults?: number; + payloadFormat?: string; + } + + export interface ListTablesContinuationToken { + nextTableName: string; + targetLocation?: Constants.StorageLocation; + } + + export interface ListTablesResponse { + continuationToken: ListTablesContinuationToken; + entries: string[]; + } + + export interface TableContinuationToken { + nextPartitionKey: string; + nextRowKey: string; + targetLocation: Constants.StorageLocation; + } + + export interface GetTableAclResult { + signedIdentifiers: {[key:string]: common.AccessPolicy}; + } + + export interface QueryEntitiesResult { + entries: T[]; + continuationToken?: TableContinuationToken; + } + + export interface EntityMetadata { + '.metadata': { etag: string; } + } + + export interface PropertyResolver { + (partitionKey: string, rowKey: string, propertyName: string, propertyValue: Object, entityPropertyType: string): string; + } + + export interface TableEntityRequestOptions extends common.RequestOptions { + payloadFormat?: string; + autoResolveProperties?: boolean; + propertyResolver?: PropertyResolver; + entityResolver?: (entityResult: Object) => Object; + echoContent?: boolean; + } + + export interface InsertEntityRequestOptions extends TableEntityRequestOptions { + echoContent: boolean; + } + + export interface BatchResponse { + statusCode?: number; + headers?: Object; + body?: Object; + isSuccessful?: boolean; + } + + export interface BatchResult { + entity?: Object; + error?: Error; + response: BatchResponse; + } + + export interface EntityProperty { + _: T; + $: string; + } + } + + export var TableService: { + /** + * Creates a new TableService object. + * If no connection string or storageaccount and storageaccesskey are provided, + * the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. + * @class + * The TableService object allows you to peform management operations with the Microsoft Azure Table Service. + * The Table Service stores data in rows of key-value pairs. A table is composed of multiple rows, and each row + * contains key-value pairs. There is no schema, so each row in a table may store a different set of keys. + * + * For more information on the Table Service, as well as task focused information on using it from a Node.js application, see + * [How to Use the Table Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-table-storage/). + * The following defaults can be set on the Table service. + * defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Table service. + * defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Table service. + * defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Table service. + * defaultLocationMode The default location mode for requests made via the Table service. + * defaultPayloadFormat The default payload format for requests made via the Table service. + * useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Table service.; true to use the + * Nagle algorithm; otherwise, false. The default value is false. + * enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use + * http(s).Agent({keepAlive:true}). + * @constructor + * @extends {StorageServiceClient} + * + * @param {string} [storageAccountOrConnectionString] The storage account or the connection string. + * @param {string} [storageAccessKey] The storage access key. + * @param {string|object} [host] The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {string} [sasToken] The Shared Access Signature token. + * @param {string} [endpointSuffix] The endpoint suffix. + */ + new (storageAccountOrConnectionString?: string, storageAccessKey?: string, host?: string|StorageHost, sasToken?: string, endpointSuffix?: string): TableService; + } + + export module TableUtilities { + /** + * Permission types. + * + * @const + * @enum {string} + */ + var SharedAccessPermissions: { + QUERY: string; + ADD: string; + UPDATE: string; + DELETE: string; + }; + + /** + * Payload Format. + * + * @const + * @enum {string} + */ + var PayloadFormat: { + FULL_METADATA: string; + MINIMAL_METADATA: string; + NO_METADATA: string; + }; + + /** + * Defines the set of Boolean operators for constructing queries. + * + * @const + * @enum {string} + */ + var TableOperators: { + AND: string; + NOT: string; + OR: string; + }; + + /** + * Filter property comparison operators. + * + * @const + * @enum {string} + */ + var QueryComparisons: { + EQUAL: string; + NOT_EQUAL: string; + GREATER_THAN: string; + GREATER_THAN_OR_EQUAL: string; + LESS_THAN: string; + LESS_THAN_OR_EQUAL: string; + }; + + /** + * Edm types. + * + * @const + * @enum {string} + */ + var EdmType: { + STRING: string; + BINARY: string; + INT64: string; + INT32: string; + DOUBLE: string; + DATETIME: string; + GUID: string; + BOOLEAN: string; + }; + + /** + * A helper to create table entities. + * + * @example + * var entGen = TableUtilities.entityGenerator; + * var entity = { PartitionKey: entGen.String('part2'), + * RowKey: entGen.String('row1'), + * boolValue: entGen.Boolean(true), + * intValue: entGen.Int32(42), + * dateValue: entGen.DateTime(new Date(Date.UTC(2011, 10, 25))), + * }; + */ + module entityGenerator { + class EntityProperty { + _: T; + $: string; + constructor(value: T, type?: string); + } + function Int32(value: number|string): EntityProperty; + function Int64(value: number|string): EntityProperty; + function Binary(value: Buffer|string): EntityProperty; + function Boolean(value: boolean|string): EntityProperty; + function String(value: string): EntityProperty; + function Guid(value: string|Buffer|any): EntityProperty; + function Double(value: number|string): EntityProperty; + function DateTime(value: Date|string): EntityProperty; + } + } + + export interface TableQuery { + /** + * Specifies the select clause. If no arguments are given, all fields will be selected. + * + * @param {array} fields The fields to be selected. + * @return {TableQuery} A table query object with the select clause. + * @example + * var tableQuery = new TableQuery().select('field1', 'field2'); + */ + select(...args: string[]): TableQuery; + select(args: string[]): TableQuery; + + /** + * Specifies the top clause. + * + * @param {int} top The number of items to fetch. + * @return {TableQuery} A table query object with the top clause. + * @example + * var tableQuery = new TableQuery().top(10); + * + * // tasktable should already exist and have entities + * tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result) { + * if(!error) { + * var entities = result.entities; // there will be 10 or less entities + * // do stuff with the returned entities if there are any + * // if result.continuationToken exists, to get the next 10 (or less) entities + * // call queryEntities as above, but with the returned token instead of null + * } + * }); + */ + top(top: number): TableQuery; + + /** + * Specifies the where clause. + * + * Valid type specifier strings include: ?string?, ?bool?, ?int32?, ?double?, ?date?, ?guid?, ?int64?, ?binary? + * A type must be specified for guid, int64, and binaries or the filter produced will be incorrect. + * + * @param {string} condition The condition string. + * @param {string|array} value Value(s) to insert in question mark (?) parameters. + * @return {TableQuery} A table query object with the where clause. + * @example + * var tableQuery = new TableQuery().where(TableQuery.guidFilter('GuidField', QueryComparisons.EQUAL, guidVal)); + * OR + * var tableQuery = new TableQuery().where('Name == ? or Name <= ?', name1, name2); + * OR + * var tableQuery = new TableQuery().where('Name == ?string? && Value == ?int64?, name1, int64Val); + * + * // tasktable should already exist and have entities + * tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result, response) { + * if(!error) { + * var entities = result.entities; + * // do stuff with the returned entities if there are any + * } + * }); + */ + where(condition: string, ...args: any[]): TableQuery; + + /** + * Specifies an AND where condition. + * + * @param {string} condition The condition string. + * @param {array} arguments Any number of arguments to be replaced in the condition by the question mark (?). + * @return {TableQuery} A table query object with the and clause. + * @example + * var tableQuery = new TableQuery() + * .where('Name == ? or Name <= ?', 'Person1', 'Person2'); + * .and('Age >= ?', 18); + */ + and(condition: string, ...args: any[]): TableQuery; + + /** + * Specifies an OR where condition. + * + * @param {string} condition The condition. + * @param {array} arguments Any number of arguments to be replaced in the condition by the question mark (?). + * @return {TableQuery} A table query object with the or clause. + * @example + * var tableQuery = new TableQuery() + * .where('Name == ? or Name <= ?', 'Person1', 'Person2'); + * .or('Age >= ?', 18); + */ + or(condition: string, ...args: any[]): TableQuery; + + /** + * Returns the query string object for the query. + * + * @return {Object} JSON object representing the query string arguments for the query. + */ + toQueryObject(): Object; + } + + export var TableQuery: { + new(): TableQuery; + + /** + * Generates a property filter condition string for an 'int' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|int} value An 'int' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.int32Filter('IntField', QueryComparisons.EQUAL, 5); + */ + int32Filter(propertyName: string, operation: string, value: string | number): string; + + /** + * Generates a property filter condition string for a 'int64' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|int64} value An 'int64' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.int64Filter('Int64Field', QueryComparisons.EQUAL, 123); + */ + int64Filter(propertyName: string, operation: string, value: string | number): string; + + /** + * Generates a property filter condition string for a 'double' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|double}value A 'double' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.doubleFilter('DoubleField', QueryComparisons.EQUAL, 123.45); + */ + doubleFilter(propertyName: string, operation: string, value: string | number): string; + + /** + * Generates a property filter condition string for a 'boolean' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|boolean} value A 'boolean' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.booleanFilter('BooleanField', QueryComparisons.EQUAL, false); + */ + booleanFilter(propertyName: string, operation: string, value: boolean | string): string; + + /** + * Generates a property filter condition string for a 'datetime' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|date} value A 'datetime' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.dateFilter('DateTimeField', QueryComparisons.EQUAL, new Date(Date.UTC(2001, 1, 3, 4, 5, 6))); + */ + dateFilter(propertyName: string, operation: string, value: Date | string): string; + + /** + * Generates a property filter condition string for a 'guid' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|guid} value A 'guid' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.guidFilter('GuidField', QueryComparisons.EQUAL, guid.v1()); + */ + guidFilter(propertyName: string, operation: string, value: string | any): string; + + /** + * Generates a property filter condition string for a 'binary' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|buffer}value A 'buffer' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.binaryFilter('BinaryField', QueryComparisons.EQUAL, Buffer.from('hello')); + */ + binaryFilter(propertyName: string, operation: string, value: Buffer | string): string; + + /** + * Generates a property filter condition string. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string} value A 'string' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.stringFilter('StringField', QueryComparisons.EQUAL, 'name'); + */ + stringFilter(propertyName: string, operation: string, value: string): string; + + /** + * Creates a filter condition using the specified logical operator on two filter conditions. + * + * @param {string} filterA A string containing the first formatted filter condition. + * @param {string} operatorString A string containing the operator to use (AND, OR). + * @param {string} filterB A string containing the second formatted filter condition. + * @return {string} A string containing the combined filter expression. + * @example + * var filter1 = TableQuery.stringFilter('Name', QueryComparisons.EQUAL, 'Person'); + * var filter2 = TableQuery.booleanFilter('Visible', QueryComparisons.EQUAL, true); + * var combinedFilter = TableQuery.combineFilters(filter1, TablUtilities.TableOperators.AND, filter2); + */ + combineFilters(filterA: string, operatorString: string, filterB: string): string; + }; + + export interface TableOperation{ + type: string; + entity: Object; + options: common.RequestOptions; + } + + export interface TableBatch { + operations: TableOperation[]; + pk: string; + retrieve: boolean; + + /** + * Removes all of the operations from the batch. + * + */ + clear(): void; + + /** + * Returns a boolean value indicating weather there are operations in the batch. + * + * @return {Boolean} True if there are operations queued up; false otherwise. + */ + hasOperations(): boolean; + + /** + * Returns the number of operations in the batch. + * + * @return {number} The number of operations in the batch. + */ + size(): number; + + /** + * Adds a retrieve operation to the batch. Note that this must be the only operation in the batch. + * + * @param {string} partitionKey The partition key. + * @param {string} rowKey The row key. + * @param {Object} [options] The request options. + * @param {string} [options.payloadFormat] The payload format to use for the request. + * @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, + * and the property Edm type if given by the service, returns the Edm type of the property. + * @param {Function(entity)} [options.entityResolver] The entity resolver. Given the single entity returned by the query, returns a modified object. + */ + retrieveEntity(partitionKey: string, rowKey: string, options?: TableService.TableEntityRequestOptions): void; + + /** + * Adds an insert operation to the batch. + * + * @param {Object} entity The entity. + * @param {Object} [options] The request options. + * @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Inserts only, default to false. + * @param {string} [options.payloadFormat] The payload format to use for the request. + * @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Only applied if echoContent is true. Given the partition key, row key, property name, + * property value, and the property Edm type if given by the service, returns the Edm type of the property. + * @param {Function(entity)} [options.entityResolver] The entity resolver. Only applied if echoContent is true. Given the single entity returned by the insert, returns + * a modified object. + */ + insertEntity(entity: Object, options: TableService.TableEntityRequestOptions): void; + + /** + * Adds a delete operation to the batch. + * + * @param {Object} entity The entity. + */ + deleteEntity(entity: Object): void; + + /** + * Adds a merge operation to the batch. + * + * @param {Object} entity The entity. + */ + mergeEntity(entity: Object): void; + + /** + * Adds an replace operation to the batch. + * + * @param {Object} entity The entity. + */ + replaceEntity(entity: Object): void; + + /** + * Adds an insert or replace operation to the batch. + * + * @param {Object} entity The entity. + */ + insertOrReplaceEntity(entity: Object): void; + + /** + * Adds an insert or merge operation to the batch. + * + * @param {Object} entity The entity. + */ + insertOrMergeEntity(entity: Object): void; + + /** + * Adds an operation to the batch after performing checks. + * + * @param {string} operationType The type of operation to perform. See Constants.TableConstants.Operations + * @param {Object} entity The entity. + * @param {Object} [options] The request options. + */ + addOperation(operationType: string, entity: Object, options?: TableService.TableEntityRequestOptions): void; + + /** + * Gets an operation from the batch. Returns null if the index does not exist. + * + * @param {number} index The index in the operations array at which to remove an element. + * @return {Object} The removed operation. + */ + getOperation(index: number): TableOperation; + + /** + * Removes an operation from the batch. Returns null if the index does not exist. + * + * @param {number} index The index in the operations array at which to remove an element. + * @return {Object} The removed operation. + */ + removeOperation(index: number): Object; + } + + export var TableBatch: { + new (): TableBatch; + }; + } + + module file { + export interface FileService extends StorageServiceClient { + defaultEnableReuseSocket: boolean; + singleFileThresholdInBytes: number; + parallelOperationThreadCount: number; + + /** + * Associate a filtering operation with this FileService. Filtering operations + * can include logging, automatically retrying, etc. Filter operations are objects + * that implement a method with the signature: + * + * "function handle (requestOptions, next)". + * + * After doing its preprocessing on the request options, the method needs to call + * "next" passing a callback with the following signature: + * signature: + * + * "function (returnObject, finalCallback, next)" + * + * In this callback, and after processing the returnObject (the response from the + * request to the server), the callback needs to either invoke next if it exists to + * continue processing other filters or simply invoke finalCallback otherwise to end + * up the service invocation. + * + * @function FileService#withFilter + * @param {Object} filter The new filter object. + * @return {FileService} A new service client with the filter applied. + */ + withFilter(newFilter: common.filters.IFilter): FileService; + + /** + * Gets the properties of a storage account's File service, including Azure Storage Analytics. + * + * @this {FileService} + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `result` will contain the properties + * and `response` will contain information related to this operation. + */ + getServiceProperties(options: common.RequestOptions, callback: ErrorOrResult): void; + getServiceProperties(callback: ErrorOrResult): void; + + /** + * Sets the properties of a storage account's File service, including Azure Storage Analytics. + * You can also use this operation to set the default request version for all incoming requests that do not have a version specified. + * + * @this {FileService} + * @param {Object} serviceProperties The service properties. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise, `response` + * will contain information related to this operation. + */ + setServiceProperties(serviceProperties: common.models.ServicePropertiesResult.ServiceProperties, options: common.RequestOptions, callback: ErrorOrResponse): void; + setServiceProperties(serviceProperties: common.models.ServicePropertiesResult.ServiceProperties, callback: ErrorOrResponse): void; + + /** + * Lists a segment containing a collection of share items under the specified account. + * + * @this {FileService} + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.maxResults] Specifies the maximum number of shares to return per call to Azure storage. + * @param {string} [options.include] Include this parameter to specify that the share's metadata be returned as part of the response body. (allowed values: '', 'metadata', 'snapshots' or any combination of them) + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of shares and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listSharesSegmented(currentToken: common.ContinuationToken, options: FileService.ListShareRequestOptions, callback: ErrorOrResult): void; + listSharesSegmented(currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of share items whose names begin with the specified prefix under the specified account. + * + * @this {FileService} + * @param {string} prefix The prefix of the share name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {string} [options.prefix] Filters the results to return only shares whose name begins with the specified prefix. + * @param {int} [options.maxResults] Specifies the maximum number of shares to return per call to Azure storage. + * @param {string} [options.include] Include this parameter to specify that the share's metadata be returned as part of the response body. (allowed values: '', 'metadata', 'snapshots' or any combination of them) + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. + * `entries` gives a list of shares and the `continuationToken` is used for the next listing operation. + * `response` will contain information related to this operation. + */ + listSharesSegmentedWithPrefix(prefix: string, currentToken: common.ContinuationToken, options: FileService.ListShareRequestOptions, callback: ErrorOrResult): void; + listSharesSegmentedWithPrefix(prefix: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Checks whether or not a share exists on the service. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the share exists, or false if the share does not exist. + * `response` will contain information related to this operation. + */ + doesShareExist(share: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + doesShareExist(share: string, callback: ErrorOrResult): void; + + /** + * Creates a new share under the specified account. + * If a share with the same name already exists, the operation fails. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {int} [options.quota] Specifies the maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the share information. + * `response` will contain information related to this operation. + */ + createShare(share: string, options: FileService.CreateShareRequestOptions, callback: ErrorOrResult): void; + createShare(share: string, callback: ErrorOrResult): void; + + createShareSnapshot(share: string, options: common.RequestOptions, callback: ErrorOrResult): void; + createShareSnapshot(share: string, callback: ErrorOrResult): void; + + /** + * Creates a new share under the specified account if the share does not exists. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {int} [options.quota] Specifies the maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the share was created, or false if the share + * already exists. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var FileService = azure.createFileService(); + * FileService.createShareIfNotExists('taskshare', function(error) { + * if(!error) { + * // Share created or already existed + * } + * }); + */ + createShareIfNotExists(share: string, options: FileService.CreateShareRequestOptions, callback: ErrorOrResult): void; + createShareIfNotExists(share: string, callback: ErrorOrResult): void; + + /** + * Retrieves a share and its properties from a specified account. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the share. + * `response` will contain information related to this operation. + */ + getShareProperties(share: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + getShareProperties(share: string, callback: ErrorOrResult): void; + + /** + * Sets the properties for the specified share. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [properties] The share properties to set. + * @param {string|int} [properties.quota] Specifies the maximum size of the share, in gigabytes. + * @param {Object} [options] The request options. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the share. + * `response` will contain information related to this operation. + */ + setShareProperties(share: string, properties: FileService.ShareProperties, options: common.RequestOptions, callback: ErrorOrResult): void; + setShareProperties(share: string, properties: FileService.ShareProperties, callback: ErrorOrResult): void; + + /** + * Gets the share statistics for a share. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `result` will contain the stats and + * `response` will contain information related to this operation. + */ + getShareStats(share: string, options: common.RequestOptions, callback: ErrorOrResult): void; + getShareStats(share: string, callback: ErrorOrResult): void; + + /** + * Returns all user-defined metadata for the share. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the share. + * `response` will contain information related to this operation. + */ + getShareMetadata(share: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + getShareMetadata(share: string, callback: ErrorOrResult): void; + + /** + * Sets the share's metadata. + * + * Calling the Set Share Metadata operation overwrites all existing metadata that is associated with the share. + * It's not possible to modify an individual name/value pair. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} metadata The metadata key/value pairs. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + setShareMetadata(share: string, metadata: Map, options: common.RequestOptions, callback: ErrorOrResult): void; + setShareMetadata(share: string, metadata: Map, callback: ErrorOrResult): void; + + /** + * Gets the share's ACL. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.FileUtilities.ShareSnapshotDeleteOptions.*. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the share. + * `response` will contain information related to this operation. + */ + getShareAcl(share: string, options: common.RequestOptions, callback: ErrorOrResult): void; + getShareAcl(share: string, callback: ErrorOrResult): void; + + /** + * Updates the share's ACL. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} signedIdentifiers The signed identifiers. Signed identifiers must be in an array. + * @param {Object} [options] The request options. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the share. + * `response` will contain information related to this operation. + */ + setShareAcl(share: string, signedIdentifiers: {[key:string]: common.AccessPolicy}, options: common.RequestOptions, callback: ErrorOrResult): void; + setShareAcl(share: string, signedIdentifiers: {[key:string]: common.AccessPolicy}, callback: ErrorOrResult): void; + + /** + * Marks the specified share for deletion. + * The share and any files contained within it are later deleted during garbage collection. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.FileUtilities.ShareSnapshotDeleteOptions.*. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + deleteShare(share: string, options: FileService.DeleteShareOptions, callback: ErrorOrResponse): void; + deleteShare(share: string, callback: ErrorOrResponse): void; + + /** + * Marks the specified share for deletion if it exists. + * The share and any files contained within it are later deleted during garbage collection. + * + * @this {FileService} + * @param {string} share The share name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the share exists and was deleted, or false if the share + * did not exist. + * `response` will contain information related to this operation. + */ + deleteShareIfExists(share: string, options: FileService.DeleteShareOptions, callback: ErrorOrResult): void; + deleteShareIfExists(share: string, callback: ErrorOrResult): void; + + /** + * Checks whether or not a directory exists on the service. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the directory exists, or false if the directory does not exist. + * `response` will contain information related to this operation. + */ + doesDirectoryExist(share: string, directory: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + doesDirectoryExist(share: string, directory: string, callback: ErrorOrResult): void; + + /** + * Creates a new directory under the specified account. + * If a directory with the same name already exists, the operation fails. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the directory information. + * `response` will contain information related to this operation. + */ + createDirectory(share: string, directory: string, options: FileService.CreateDirectoryRequestOptions, callback: ErrorOrResult): void; + createDirectory(share: string, directory: string, callback: ErrorOrResult): void; + + /** + * Creates a new directory under the specified account if the directory does not exists. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the directory was created, or false if the directory + * already exists. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var FileService = azure.createFileService(); + * FileService.createDirectoryIfNotExists('taskshare', taskdirectory', function(error) { + * if(!error) { + * // Directory created or already existed + * } + * }); + */ + createDirectoryIfNotExists(share: string, directory: string, options: FileService.CreateDirectoryRequestOptions, callback: ErrorOrResult): void; + createDirectoryIfNotExists(share: string, directory: string, callback: ErrorOrResult): void; + + /** + * Retrieves a directory and its properties from a specified account. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information for the directory. + * `response` will contain information related to this operation. + */ + getDirectoryProperties(share: string, directory: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + getDirectoryProperties(share: string, directory: string, callback: ErrorOrResult): void; + + /** + * Marks the specified directory for deletion. The directory must be empty before it can be deleted. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; otherwise + * `response` will contain information related to this operation. + */ + deleteDirectory(share: string, directory: string, options: common.RequestOptions, callback: ErrorOrResponse): void; + deleteDirectory(share: string, directory: string, callback: ErrorOrResponse): void; + + /** + * Marks the specified directory for deletion if it exists. The directory must be empty before it can be deleted. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the directory exists and was deleted, or false if the directory + * did not exist. + * `response` will contain information related to this operation. + */ + deleteDirectoryIfExists(share: string, directory: string, options: common.RequestOptions, callback: ErrorOrResult): void; + deleteDirectoryIfExists(share: string, directory: string, callback: ErrorOrResult): void; + + /** + * Lists a segment containing a collection of file items in the directory. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {int} [options.maxResults] Specifies the maximum number of files to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs]The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * entries.files, entries.directories and the continuationToken for the next listing operation. + * `response` will contain information related to this operation. + */ + listFilesAndDirectoriesSegmented(share: string, directory: string, currentToken: common.ContinuationToken, options: FileService.ListRequestOptions, callback: ErrorOrResult): void; + listFilesAndDirectoriesSegmented(share: string, directory: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + + /** + * Lists a segment containing a collection of file items in the directory. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} prefix The prefix of the directory/files name. + * @param {Object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {int} [options.maxResults] Specifies the maximum number of files to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs]The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * entries.files, entries.directories and the continuationToken for the next listing operation. + * `response` will contain information related to this operation. + */ + listFilesAndDirectoriesSegmentedWithPrefix(share: string, directory: string, prefix: string, currentToken: common.ContinuationToken, options: FileService.ListRequestOptions, callback: ErrorOrResult): void; + listFilesAndDirectoriesSegmentedWithPrefix(share: string, directory: string,prefix: string, currentToken: common.ContinuationToken, callback: ErrorOrResult): void; + + /** + * Returns all user-defined metadata for the specified directory. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the file. + * `response` will contain information related to this operation. + */ + getDirectoryMetadata(share: string, directory: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + getDirectoryMetadata(share: string, directory: string, callback: ErrorOrResult): void; + + /** + * Sets user-defined metadata for the specified directory as one or more name-value pairs + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {Object} metadata The metadata key/value pairs. + * @param {Object} [options] The request options. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information on the file. + * `response` will contain information related to this operation. + */ + setDirectoryMetadata(share: string, directory: string, metadata: Map, options: common.RequestOptions, callback: ErrorOrResult): void; + setDirectoryMetadata(share: string, directory: string, metadata: Map, callback: ErrorOrResult): void; + + /** + * Retrieves a shared access signature token. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} [directory] The directory name. Use '' to refer to the base directory. + * @param {string} [file] The file name. + * @param {Object} sharedAccessPolicy The shared access policy. + * @param {string} [sharedAccessPolicy.Id] The signed identifier. + * @param {Object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. + * @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). + * @param {date|string} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired (The UTC value will be used). + * @param {Object} [headers] The optional header values to set for a file returned wth this SAS. + * @param {string} [headers.cacheControl] The optional value of the Cache-Control response header to be returned when this SAS is used. + * @param {string} [headers.contentType] The optional value of the Content-Type response header to be returned when this SAS is used. + * @param {string} [headers.contentEncoding] The optional value of the Content-Encoding response header to be returned when this SAS is used. + * @param {string} [headers.contentLanguage] The optional value of the Content-Language response header to be returned when this SAS is used. + * @param {string} [headers.contentDisposition] The optional value of the Content-Disposition response header to be returned when this SAS is used. + * @return {string} The shared access signature. Note this does not contain the leading "?". + */ + generateSharedAccessSignature(share: string, directory: string, file: string, sharedAccessPolicy: common.SharedAccessPolicy, headers?: common.ContentSettingsHeaders): string; + + /** + * Retrieves a shared access signature token. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} [directory] The directory name. Use '' to refer to the base directory. + * @param {string} [file] The file name. + * @param {Object} sharedAccessPolicy The shared access policy. + * @param {string} [sharedAccessPolicy.Id] The signed identifier. + * @param {Object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. + * @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). + * @param {date|string} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired (The UTC value will be used). + * @param {string} [sasVersion] An optional string indicating the desired SAS version to use. Value must be 2012-02-12 or later. + * @param {Object} [headers] The optional header values to set for a file returned wth this SAS. + * @param {string} [headers.cacheControl] The optional value of the Cache-Control response header to be returned when this SAS is used. + * @param {string} [headers.contentType] The optional value of the Content-Type response header to be returned when this SAS is used. + * @param {string} [headers.contentEncoding] The optional value of the Content-Encoding response header to be returned when this SAS is used. + * @param {string} [headers.contentLanguage] The optional value of the Content-Language response header to be returned when this SAS is used. + * @param {string} [headers.contentDisposition] The optional value of the Content-Disposition response header to be returned when this SAS is used. + * @return {string} The shared access signature query string. Note this string does not contain the leading "?". + */ + generateSharedAccessSignatureWithVersion(share: string, directory: string, file: string, sharedAccessPolicy: common.SharedAccessPolicy, sasVersion: string, headers?: common.ContentSettingsHeaders): string; + + /** + * Retrieves a file or directory URL. + * + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} [file] The file name. File names may not start or end with the delimiter '/'. + * @param {string} [sasToken] The Shared Access Signature token. + * @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. + * @param {string} [shareSnapshotId] The snapshot identifier of the share. + * @return {string} The formatted URL string. + * @example + * var azure = require('azure-storage'); + * var FileService = azure.createFileService(); + * var url = FileService.getUrl(shareName, directoryName, fileName, sasToken, true); + */ + getUrl(share: string, directory: string, file?: string, sasToken?: string, primary?: boolean, shareSnapshotId?: string): string; + + /** + * Returns all user-defined metadata, standard HTTP properties, and system properties for the file. + * It does not return or modify the content of the file. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the file. + * `response` will contain information related to this operation. + */ + getFileProperties(share: string, directory: string, file: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + getFileProperties(share: string, directory: string, file: string, callback: ErrorOrResult): void; + + /** + * Returns all user-defined metadata for the specified file. + * It does not modify or return the content of the file. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the file. + * `response` will contain information related to this operation. + */ + getFileMetadata(share: string, directory: string, file: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + getFileMetadata(share: string, directory: string, file: string, callback: ErrorOrResult): void; + + /** + * Sets user-defined properties for the specified file. + * It does not modify or return the content of the file. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {string} [options.contentType] The MIME content type of the file. The default type is application/octet-stream. + * @param {string} [options.contentEncoding] The content encodings that have been applied to the file. + * @param {string} [options.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentMD5] The file's MD5 hash. + * @param {string} [options.cacheControl] The file service stores this value but does not use or modify it. + * @param {string} [options.contentDisposition] The file's content disposition. + * @param {string} [options.contentLength] Resizes a file to the specified size. If the specified byte value is less than the current size of the file, + * then all ranges above the specified byte value are cleared. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the file. + * `response` will contain information related to this operation. + */ + setFileProperties(share: string, directory: string, file: string, options: FileService.SetFilePropertiesRequestOptions, callback: ErrorOrResult): void; + setFileProperties(share: string, directory: string, file: string, callback: ErrorOrResult): void; + + /** + * Sets user-defined metadata for the specified file as one or more name-value pairs + * It does not modify or return the content of the file. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} metadata The metadata key/value pairs. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information on the file. + * `response` will contain information related to this operation. + */ + setFileMetadata(share: string, directory: string, file: string, metadata: Map, options: common.RequestOptions, callback: ErrorOrResult): void; + setFileMetadata(share: string, directory: string, file: string, metadata: Map, callback: ErrorOrResult): void; + + /** + * Resizes a file. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {String} size The size of the file, in bytes. + * @param {Object} [options] The request options. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * information about the file. + * `response` will contain information related to this operation. + */ + resizeFile(share: string, directory: string, file: string, size: number, options: common.RequestOptions, callback: ErrorOrResult): void; + resizeFile(share: string, directory: string, file: string, size: number, callback: ErrorOrResult): void; + + /** + * Checks whether or not a file exists on the service. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `errorOrResult` will + * be true if the file exists, or false if the file does not exist. + * `response` will contain information related to this operation. + */ + doesFileExist(share: string, directory: string, file: string, options: FileService.FileServiceOptions, callback: ErrorOrResult): void; + doesFileExist(share: string, directory: string, file: string, callback: ErrorOrResult): void; + + /** + * Creates a file of the specified length. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {int} length The length of the file in bytes. + * @param {Object} [options] The request options. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {Object} [options.contentSettings] The file's content settings. + * @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The file's content disposition. + * @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the directory information. + * `response` will contain information related to this operation. + */ + createFile(share: string, directory: string, file: string, length: number, options: FileService.CreateFileRequestOptions, callback: ErrorOrResult): void; + createFile(share: string, directory: string, file: string, length: number, callback: ErrorOrResult): void; + + /** + * Marks the specified file for deletion. The file is later deleted during garbage collection. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information + * if an error occurs; `response` will contain information related to this operation. + */ + deleteFile(share: string, directory: string, file: string, options: common.RequestOptions, callback: ErrorOrResponse): void; + deleteFile(share: string, directory: string, file: string, callback: ErrorOrResponse): void; + + /** + * Marks the specified file for deletion if it exists. The file is later deleted during garbage collection. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will + * be true if the file was deleted, or false if the file + * does not exist. + * `response` will contain information related to this operation. + */ + deleteFileIfExists(share: string, directory: string, file: string, options: common.RequestOptions, callback: ErrorOrResult): void; + deleteFileIfExists(share: string, directory: string, file: string, callback: ErrorOrResult): void; + + /** + * Downloads a file into a text string. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {int} [options.rangeStart] The range start. + * @param {int} [options.rangeEnd] The range end. + * @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {FileService~FileToText} callback `error` will contain information + * if an error occurs; otherwise `text` will contain the file contents, + * and `file` will contain the file information. + * `response` will contain information related to this operation. + */ + getFileToText(share: string, directory: string, file: string, options: FileService.GetFileRequestOptions, callback: FileService.FileToText): void; + getFileToText(share: string, directory: string, file: string, callback: FileService.FileToText): void; + + /** + * Downloads an Azure file into a file. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {string} localFileName The local path to the file to be downloaded. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. + * @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. + * @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. + * @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the file information. + * `response` will contain information related to this operation. + * @example + * var azure = require('azure-storage'); + * var FileService = azure.createFileService(); + * FileService.getFileToLocalFile('taskshare', taskdirectory', 'task1', 'task1-download.txt', function(error, serverFile) { + * if(!error) { + * // file available in serverFile.file variable + * } + */ + getFileToLocalFile(share: string, directory: string, file: string, localFileName: string, options: FileService.GetFileRequestOptions, callback: ErrorOrResult): void; + getFileToLocalFile(share: string, directory: string, file: string, localFileName: string, callback: ErrorOrResult): void; + + /** + * Provides a stream to read from a file. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. + * @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. + * @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. + * @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the file information. + * `response` will contain information related to this operation. + * @return {Readable} A Node.js Readable stream. + * @example + * var azure = require('azure-storage'); + * var fileService = azure.createFileService(); + * var writable = fs.createWriteStream(destinationFileNameTarget); + * fileService.createReadStream(shareName, directoryName, fileName).pipe(writable); + */ + createReadStream(share: string, directory: string, file: string, options: FileService.GetFileRequestOptions, callback: ErrorOrResult): stream.Readable; + createReadStream(share: string, directory: string, file: string, callback: ErrorOrResult): stream.Readable; + + /** + * Downloads a file into a stream. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Writable} writeStream The Node.js Writable stream. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. + * @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. + * @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. + * @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `result` will contain the file information. + * `response` will contain information related to this operation. + * + * @example + * var azure = require('azure-storage'); + * var FileService = azure.createFileService(); + * FileService.getFileToStream('taskshare', taskdirectory', 'task1', fs.createWriteStream('task1-download.txt'), function(error, serverFile) { + * if(!error) { + * // file available in serverFile.file variable + * } + * }); + */ + getFileToStream(share: string, directory: string, file: string, writeStream: stream.Writable, options: FileService.GetFileRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + getFileToStream(share: string, directory: string, file: string, writeStream: stream.Writable, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Lists file ranges. Lists all of the ranges by default, or only the ranges over a specific range of bytes if rangeStart and rangeEnd are specified. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {string} [options.shareSnapshotId] The snapshot identifier of the share. + * @param {int} [options.rangeStart] The range start. + * @param {int} [options.rangeEnd] The range end. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the range information. + * `response` will contain information related to this operation. + */ + listRanges(share: string, directory: string, file: string, options: FileService.ListRangeRequestOptions, callback: ErrorOrResult): void; + listRanges(share: string, directory: string, file: string, callback: ErrorOrResult): void; + + /** + * Clears a range. Clears all of the ranges by default, or only the ranges over a specific range of bytes if rangeStart and rangeEnd are specified. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {int} rangeStart The range start. + * @param {int} rangeEnd The range end. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the directory information. + * `response` will contain information related to this operation. + */ + clearRange(share: string, directory: string, file: string, rangeStart: number, rangeEnd: number, options: common.RequestOptions, callback: ErrorOrResult): void; + clearRange(share: string, directory: string, file: string, rangeStart: number, rangeEnd: number, callback: ErrorOrResult): void; + + /** + * Updates a range from a stream. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Readable} readStream The Node.js Readable stream. + * @param {int} rangeStart The range start. + * @param {int} rangeEnd The range end. + * @param {Object} [options] The request options. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {string} [options.transactionalContentMD5] An optional hash value used to ensure transactional integrity for the page. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the file information. + * `response` will contain information related to this operation. + */ + createRangesFromStream(share: string, directory: string, file: string, readStream: stream.Readable, rangeStart: number, rangeEnd: number, options: FileService.CreateRangeRequestOptions, callback: ErrorOrResult): void; + createRangesFromStream(share: string, directory: string, file: string, readStream: stream.Readable, rangeStart: number, rangeEnd: number, callback: ErrorOrResult): void; + + /** + * Uploads a file from a text string. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {string|object} text The file text, as a string or in a Buffer. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects; + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. + * The default value is false for files. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The file's content settings. + * @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The file's content disposition. + * @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {ErrorOrResult} callback `error` will contain information + * if an error occurs; `result` will contain the file information. + * `response` will contain information related to this operation. + */ + createFileFromText(share: string, directory: string, file: string, text: string | Buffer, options: FileService.CreateFileRequestOptions, callback: ErrorOrResult): void; + createFileFromText(share: string, directory: string, file: string, text: string | Buffer, callback: ErrorOrResult): void; + + /** + * Uploads a file to storage from a local file. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param (string) localFileName The local path to the file to be uploaded. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects; + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. + * The default value is false for files. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The file's content settings. + * @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The file's content disposition. + * @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + createFileFromLocalFile(share: string, directory: string, file: string, localFileName: string, options: FileService.CreateFileRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createFileFromLocalFile(share: string, directory: string, file: string, localFileName: string, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads a file to storage from an HTML File object. If the file already exists on the service, it will be overwritten. + * (Only available in the JavaScript Client Library for Browsers) + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} browserFile The File object to be uploaded created by HTML File API. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects; + * @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. + * The default value is false for files. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The file's content settings. + * @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The file's content disposition. + * @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information if an error occurs; + * otherwise `[result]{@link FileResult}` will contain the file information. + * `response` will contain information related to this operation. + * @return {SpeedSummary} + */ + createFileFromBrowserFile(share: string, directory: string, file: string, browserFile: Object, options: FileService.CreateFileRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createFileFromBrowserFile(share: string, directory: string, file: string, browserFile: Object, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Uploads a file from a stream. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param (Stream) stream Stream to the data to store. + * @param {int} streamLength The length of the stream to upload. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects; + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. + * The default value is false for files. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The file's content settings. + * @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The file's content disposition. + * @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {SpeedSummary} + */ + createFileFromStream(share: string, directory: string, file: string, stream: stream.Readable, streamLength: number, options: FileService.CreateFileRequestOptions, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + createFileFromStream(share: string, directory: string, file: string, stream: stream.Readable, streamLength: number, callback: ErrorOrResult): common.streams.speedsummary.SpeedSummary; + + /** + * Provides a stream to write to a file. Assumes that the file exists. + * If it does not, please create the file using createFile before calling this method or use createWriteStreamNewFile. + * Please note the `Stream` returned by this API should be used with piping. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects; + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. + * The default value is false for files. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The file's content settings. + * @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The file's content disposition. + * @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {Writable} A Node.js Writable stream. + * @example + * var azure = require('azure-storage'); + * var FileService = azure.createFileService(); + * FileService.createFile(shareName, directoryName, fileName, 1024, function (err) { + * // Pipe file to a file + * var stream = fs.createReadStream(fileNameTarget).pipe(FileService.createWriteStreamToExistingFile(shareName, directoryName, fileName)); + * }); + */ + createWriteStreamToExistingFile(share: string, directory: string, file: string, options: FileService.CreateFileRequestOptions, callback: ErrorOrResult): stream.Writable; + createWriteStreamToExistingFile(share: string, directory: string, file: string, callback: ErrorOrResult): stream.Writable; + + /** + * Provides a stream to write to a file. Creates the file before writing data. + * Please note the `Stream` returned by this API should be used with piping. + * + * @this {FileService} + * @param {string} share The share name. + * @param {string} directory The directory name. Use '' to refer to the base directory. + * @param {string} file The file name. File names may not start or end with the delimiter '/'. + * @param {string} length The file length. + * @param {Object} [options] The request options. + * @param {SpeedSummary} [options.speedSummary] The download tracker objects; + * @param {Object} [options.metadata] The metadata key/value pairs. + * @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. + * The default value is false for files. + * @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. + * @param {Object} [options.contentSettings] The file's content settings. + * @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. + * @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. + * @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. + * @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. + * @param {string} [options.contentSettings.contentDisposition] The file's content disposition. + * @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback The callback function. + * @return {Writable} A Node.js Writable stream. + * @example + * var azure = require('azure-storage'); + * var FileService = azure.createFileService(); + * var stream = fs.createReadStream(fileNameTarget).pipe(FileService.createWriteStreamToNewFile(shareName, directoryName, fileName)); + */ + createWriteStreamToNewFile(share: string, directory: string, file: string, length: number, options: FileService.CreateFileRequestOptions, callback: ErrorOrResult): stream.Writable; + createWriteStreamToNewFile(share: string, directory: string, file: string, length: number, callback: ErrorOrResult): stream.Writable; + + /** + * Starts to copy a file to a destination within the storage account. + * + * @this {FileService} + * @param {string} sourceUri The source file or blob URI. + * @param {string} targetShare The target share name. + * @param {string} targetDirectory The target directory name. + * @param {string} targetFile The target file name. + * @param {Object} [options] The request options. + * @param {Object} [options.metadata] The target file metadata key/value pairs. + * @param {AccessConditions} [options.accessConditions] The access conditions. + * @param {AccessConditions} [options.sourceAccessConditions] The source access conditions. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResult} callback `error` will contain information + * if an error occurs; otherwise `result` will contain + * the file information. + * `response` will contain information related to this operation. + */ + startCopyFile(sourceUri: string, targetshare: string, targetdirectory: string, targetfile: string, options: FileService.CopyFileRequestOptions, callback: ErrorOrResult): void; + startCopyFile(sourceUri: string, targetshare: string, targetdirectory: string, targetfile: string, callback: ErrorOrResult): void; + + /** + * Abort a file copy operation. + * + * @this {FileService} + * @param {string} share The destination share name. + * @param {string} directory The destination directory name. + * @param {string} file The destination file name. + * @param {string} copyId The copy operation identifier. + * @param {Object} [options] The request options. + * @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. + * Please see StorageUtilities.LocationMode for the possible values. + * @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. + * @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + * The maximum execution time interval begins at the time that the client begins building the request. The maximum + * execution time is checked intermittently while performing requests, and before executing retries. + * @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. + * @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + * The default value is false. + * @param {errorOrResponse} callback `error` will contain information if an error occurs. + * `response` will contain information related to this operation. + */ + abortCopyFile(share: string, directory: string, file: string, copyId: string, options: common.RequestOptions, callback: ErrorOrResponse): void; + abortCopyFile(share: string, directory: string, file: string, copyId: string, callback: ErrorOrResponse): void; + } + + export module FileService { + export interface ListSharesResult { + entries: ShareResult[]; + continuationToken?: common.ContinuationToken + } + + export interface ListFilesAndDirectoriesResult { + entries: { + directories: DirectoryResult[]; + files: FileResult[]; + } + continuationToken?: common.ContinuationToken; + } + + export interface ShareResult { + name: string; + snapshot?: string; + etag: string; + lastModified: string; + metadata?: { [key: string]: string; }; + requestId?: string; + quota?: string; + shareStats? : { + shareUsage?: string; + } + exists?: boolean; + created?: boolean; + } + + export interface ShareAclResult extends ShareResult { + signedIdentifiers?: Map + } + + export interface DirectoryResult { + name: string; + etag: string; + lastModified: string; + requestId?: string; + metadata?: { [key: string]: string; }; + serverEncrypted?: string; + exists?: boolean; + created?: boolean; + } + + export interface FileResult { + share: string; + directory: string; + name: string; + etag: string; + lastModified: string; + requestId?: string; + acceptRanges: string; + contentRange: string; + contentLength: string; + metadata?: { [key: string]: string; }; + serverEncrypted?: string; + contentSettings?:{ + contentEncoding: string; + contentLanguage: string; + contentType: string; + cacheControl: string; + contentDisposition: string; + contentMD5: string; + } + copy?: { + id?: string; + source?: string; + status?: string; + completionTime?: string; + statusDescription?: string; + progress?: string; + }; + exists?: boolean; + created?: boolean; + } + + export interface ShareProperties { + quota: number + } + + export interface FileServiceOptions extends common.RequestOptions { + shareSnapshotId?: string; + } + + export interface DeleteShareOptions extends FileServiceOptions { + deleteSnapshots?: string; + } + + export interface ListRangeRequestOptions extends FileServiceOptions { + rangeStart?: number; + rangeEnd?: number; + } + + export interface GetFileRequestOptions extends FileServiceOptions { + parallelOperationThreadCount?: number; + rangeStart?: number; + rangeEnd?: number; + useTransactionalMD5?: boolean; + disableContentMD5Validation?: boolean; + } + + export interface ListRequestOptions extends FileServiceOptions { + maxResults?: number; + include?: string; + } + + export interface ListShareRequestOptions extends common.RequestOptions { + maxResults?: number; + include?: string; + } + + export interface CreateShareRequestOptions extends common.RequestOptions { + quota?: string | number; + } + + export interface CreateFileRequestOptions extends common.RequestOptions { + speedsummary?: common.streams.speedsummary.SpeedSummary; + metadata?: { [key: string]: string; }; + contentSettings?: { + contentType?: string; + contentEncoding?: string; + contentLanguage?: string; + contentMD5?: string; + cacheControl?: string; + contentDisposition?: string; + }; + useTransactionalMD5?: boolean; + storeFileContentMD5?: boolean; + } + + export interface CopyFileRequestOptions extends common.RequestOptions { + metadata?: { [key: string]: string; }; + accessConditions?: AccessConditions; + sourceAccessConditions?: AccessConditions; + } + + export interface CreateRangeRequestOptions extends common.RequestOptions { + useTransactionalMD5?: boolean; + transactionalContentMD5?: string; + } + + export interface CreateDirectoryRequestOptions extends common.RequestOptions { + metadata?: { [key: string]: string; }; + } + + export interface SetFilePropertiesRequestOptions extends common.RequestOptions { + contentType?: string; + contentEncoding?: string; + contentLanguage?: string; + contentMD5?: string; + cacheControl?: string; + contentDisposition?: string; + } + + export interface FileToText { + (error: Error, text: string, result: FileResult, response: ServiceResponse): void + } + } + + export var FileService: { + /** + * Creates a new FileService object. + * If no connection string or storageaccount and storageaccesskey are provided, + * the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. + * @class + * The FileService class is used to perform operations on the Microsoft Azure File Service. + * The File Service provides storage for binary large objects, and provides functions for working with data stored in files. + * + * For more information on the File Service, as well as task focused information on using it in a Node.js application, see + * [How to Use the File Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-file-storage/). + * The following defaults can be set on the file service. + * defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the file service. + * defaultEnableReuseSocket The default boolean value to enable socket reuse when uploading local files or streams. + * If the Node.js version is lower than 0.10.x, socket reuse will always be turned off. + * defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the file service. + * defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the file service. + * defaultLocationMode The default location mode for requests made via the file service. + * parallelOperationThreadCount The number of parallel operations that may be performed when uploading a file. + * useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the file service; true to use the + * Nagle algorithm; otherwise, false. The default value is false. + * enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use + * http(s).Agent({keepAlive:true}). + * @constructor + * @extends {StorageServiceClient} + * + * @param {string} [storageAccountOrConnectionString] The storage account or the connection string. + * @param {string} [storageAccessKey] The storage access key. + * @param {string|object} [host] The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {string} [sasToken] The Shared Access Signature token. + * @param {string} [endpointSuffix] The endpoint suffix. + */ + new (storageAccountOrConnectionString?: string, storageAccessKey?: string, host?: string|StorageHost, sasToken?: string, endpointSuffix?: string): FileService; + } + + // ########################### + // ./services/file/fileutilities + // ########################### + export module FileUtilities { + var SharedAccessPermissions: { + READ: string; + CREATE: string; + WRITE: string; + DELETE: string; + LIST: string; + }; + + var ListingDetails: { + METADATA: string; + }; + + var SharePublicAccessType: { + OFF: string; + SHARE: string; + FILE: string; + }; + + var ShareSnapshotDeleteOptions: { + SHARE_AND_SNAPSHOTS: string + }; + } + } + } + + module common { + module filters { + + export interface IFilter { + handle(requestOptions: common.RequestOptions, next: Next) : void + } + + export interface Next { + (returnedObject: any, finalCallback: Post, nextPostCallback: Post) : void; + } + + export interface Post { + (returnedObject: any) : void; + } + + // ########################### + // ./common/filters/retrypolicyfilter + // ########################### + module retrypolicyfilter { + export interface RetryPolicyFilter extends RetryPolicyFilter.IRetryPolicy { + retryCount: number; + retryInterval: number; + /** + * Creates a new RetryPolicyFilter instance. + * @class + * The RetryPolicyFilter allows you to retry operations, + * using a custom retry policy. Users are responsible to + * define the shouldRetry method. + * To apply a filter to service operations, use `withFilter` + * and specify the filter to be used when creating a service. + * @constructor + * @param {number} [retryCount=30000] The client retry count. + * @param {number} [retryInterval=3] The client retry interval, in milliseconds. + * + * @example + * var azure = require('azure-storage'); + * var retryPolicy = new azure.RetryPolicyFilter(); + * retryPolicy.retryCount = 3; + * retryPolicy.retryInterval = 3000; + * retryPolicy.shouldRetry = function(statusCode, retryRequestOption) { + * + * }; + * var blobService = azure.createBlobService().withFilter(retryPolicy); + */ + constructor(retryCount?: number, retryInterval?: number): RetryPolicyFilter; + + shouldRetry(statusCode: number, retryData: RetryPolicyFilter.IRetryRequestOptions): { + retryInterval: number; + retryable: boolean; + }; + } + export module RetryPolicyFilter { + /** + * Represents the default client retry interval, in milliseconds. + */ + export var DEFAULT_CLIENT_RETRY_INTERVAL: number; + /** + * Represents the default client retry count. + */ + export var DEFAULT_CLIENT_RETRY_COUNT: number; + + export interface IRetryRequestOptions { + retryInterval: number; + locationMode: StorageUtilities.LocationMode; + currentLocation: Constants.StorageLocation; + retryContext: IRetryContext; + } + export interface IRetryContext { + retryCount: number; + error: Error; + retryInterval: number; + locationMode: StorageUtilities.LocationMode; + currentLocation: Constants.StorageLocation; + } + export interface IRetryPolicy extends IFilter { + retryInterval: number; + shouldRetry(statusCode: number, retryData: IRetryRequestOptions): { + retryInterval: number; + retryable: boolean; + }; + } + } + } + + // ########################### + // ./common/filters/linearretrypolicyfilter + // ########################### + module linearretrypolicyfilter { + export class LinearRetryPolicyFilter implements RetryPolicyFilter.IRetryPolicy { + retryCount: number; + retryInterval: number; + /** + * Creates a new LinearRetryPolicyFilter instance. + * @class + * The LinearRetryPolicyFilter allows you to retry operations, + * using an linear back-off interval between retries. + * To apply a filter to service operations, use `withFilter` + * and specify the filter to be used when creating a service. + * @constructor + * @param {number} [retryCount=30000] The client retry count. + * @param {number} [retryInterval=3] The client retry interval, in milliseconds. + * + * @example + * var azure = require('azure-storage'); + * var retryOperations = new azure.LinearRetryPolicyFilter(); + * var blobService = azure.createBlobService().withFilter(retryOperations) + */ + constructor(retryCount?: number, retryInterval?: number); + /** + * Represents the default client retry interval, in milliseconds. + */ + static DEFAULT_CLIENT_RETRY_INTERVAL: number; + /** + * Represents the default client retry count. + */ + static DEFAULT_CLIENT_RETRY_COUNT: number; + /** + * Determines if the operation should be retried and how long to wait until the next retry. + * + * @param {number} statusCode The HTTP status code. + * @param {Object} retryData The retry data. + * @return {retryInfo} Information about whether the operation qualifies for a retry and the retryInterval. + */ + shouldRetry(statusCode: number, retryData: RetryPolicyFilter.IRetryRequestOptions): { + retryInterval: number; + retryable: boolean; + }; + handle(requestOptions: common.RequestOptions, next: Next) : void + } + } + + // ########################### + // ./common/filters/exponentialretrypolicyfilter + // ########################### + module exponentialretrypolicyfilter { + export class ExponentialRetryPolicyFilter implements RetryPolicyFilter.IRetryPolicy { + retryCount: number; + retryInterval: number; + minRetryInterval: number; + maxRetryInterval: number; + /** + * Creates a new 'ExponentialRetryPolicyFilter' instance. + * @class + * The ExponentialRetryPolicyFilter allows you to retry operations, + * using an exponential back-off interval between retries. + * To apply a filter to service operations, use `withFilter` + * and specify the filter to be used when creating a service. + * @constructor + * @param {number} [retryCount=3] The client retry count. + * @param {number} [retryInterval=30000] The client retry interval, in milliseconds. + * @param {number} [minRetryInterval=3000] The minimum retry interval, in milliseconds. + * @param {number} [maxRetryInterval=90000] The maximum retry interval, in milliseconds. + * + * @example + * var azure = require('azure-storage'); + * var retryOperations = new azure.ExponentialRetryPolicyFilter(); + * var blobService = azure.createBlobService().withFilter(retryOperations) + */ + constructor(retryCount?: number, retryInterval?: number, minRetryInterval?: number, maxRetryInterval?: number); + /** + * Represents the default client retry interval, in milliseconds. + */ + static DEFAULT_CLIENT_RETRY_INTERVAL: number; + /** + * Represents the default client retry count. + */ + static DEFAULT_CLIENT_RETRY_COUNT: number; + /** + * Represents the default maximum retry interval, in milliseconds. + */ + static DEFAULT_CLIENT_MAX_RETRY_INTERVAL: number; + /** + * Represents the default minimum retry interval, in milliseconds. + */ + static DEFAULT_CLIENT_MIN_RETRY_INTERVAL: number; + /** + * Determines if the operation should be retried and how long to wait until the next retry. + * + * @param {number} statusCode The HTTP status code. + * @param {Object} retryData The retry data. + * @return {retryInfo} Information about whether the operation qualifies for a retry and the retryInterval. + */ + shouldRetry(statusCode: number, retryData: RetryPolicyFilter.IRetryRequestOptions): { + retryInterval: number; + retryable: boolean; + }; + handle(requestOptions: common.RequestOptions, next: Next) : void + } + } + } + + module util { + + // ########################### + // ./common/util/constants + // ########################### + module constants { + export var USER_AGENT_PRODUCT_NAME: string; + export var USER_AGENT_PRODUCT_VERSION: string; + /** + * The number of default concurrent requests for parallel operation. + * + * @const + * @type {string} + */ + export var DEFAULT_PARALLEL_OPERATION_THREAD_COUNT: number; + /** + * The boolean of default value for enabling reuseSocket. + * + * @const + * @type {bool} + */ + export var DEFAULT_ENABLE_REUSE_SOCKET: boolean; + /** + * Constant representing a kilobyte (Non-SI version). + * + * @const + * @type {string} + */ + export var KB: number; + /** + * Constant representing a megabyte (Non-SI version). + * + * @const + * @type {string} + */ + export var MB: number; + /** + * Constant representing a gigabyte (Non-SI version). + * + * @const + * @type {string} + */ + export var GB: number; + /** + * Specifies HTTP. + * + * @const + * @type {string} + */ + export var HTTP: string; + /** + * Specifies HTTPS. + * + * @const + * @type {string} + */ + export var HTTPS: string; + /** + * Marker for atom metadata. + * + * @const + * @type {string} + */ + export var XML_METADATA_MARKER: string; + /** + * Marker for atom value. + * + * @const + * @type {string} + */ + export var XML_VALUE_MARKER: string; + /** + * Specifies the location used to indicate which location the operation can be performed against. + * + * @const + * @enum + */ + export enum RequestLocationMode { + PRIMARY_ONLY = 0, + SECONDARY_ONLY = 1, + PRIMARY_OR_SECONDARY = 2, + } + /** + * Represents a storage service location. + * + * @const + * @enum + */ + export enum StorageLocation { + PRIMARY = 0, + SECONDARY = 1, + } + + export var AccountSasConstants: { + /** + * Permission types + * + * @const + * @enum {string} + */ + Permissions: { + READ: string, + ADD: string, + CREATE: string, + UPDATE: string, + PROCESS: string, + WRITE: string, + DELETE: string, + LIST: string + }, + + /** + * Services types + * + * @const + * @enum {string} + */ + Services: { + BLOB: string, + FILE: string, + QUEUE: string, + TABLE: string, + }, + + /** + * Resources types + * + * @const + * @enum {string} + */ + Resources: { + SERVICE: string, + CONTAINER: string, + OBJECT: string + }, + + Protocols: { + HTTPSONLY: string, + HTTPSORHTTP: string + }, + }; + + /** + * Defines constants for use with shared access policies. + */ + export var AclConstants: { + ACCESS_POLICY: string; + EXPIRY: string; + ID: string; + PERMISSION: string; + SIGNED_IDENTIFIER_ELEMENT: string; + SIGNED_IDENTIFIERS_ELEMENT: string; + START: string; + }; + /** + * Defines constants for use with service properties. + */ + export var ServicePropertiesConstants: { + STORAGE_SERVICE_PROPERTIES_ELEMENT: string; + DEFAULT_ANALYTICS_VERSION: string; + LOGGING_ELEMENT: string; + VERSION_ELEMENT: string; + DELETE_ELEMENT: string; + READ_ELEMENT: string; + WRITE_ELEMENT: string; + RETENTION_POLICY_ELEMENT: string; + ENABLED_ELEMENT: string; + DAYS_ELEMENT: string; + HOUR_METRICS_ELEMENT: string; + MINUTE_METRICS_ELEMENT: string; + CORS_ELEMENT: string; + CORS_RULE_ELEMENT: string; + ALLOWED_ORIGINS_ELEMENT: string; + ALLOWED_METHODS_ELEMENT: string; + MAX_AGE_IN_SECONDS_ELEMENT: string; + EXPOSED_HEADERS_ELEMENT: string; + ALLOWED_HEADERS_ELEMENT: string; + INCLUDE_APIS_ELEMENT: string; + DEFAULT_SERVICE_VERSION_ELEMENT: string; + DEFAULT_DELETE_RETENTION_POLICY_ELEMENT: string; + }; + /** + * Defines constants for use with blob operations. + */ + export var BlobConstants: { + LATEST_ELEMENT: string; + UNCOMMITTED_ELEMENT: string; + BLOCK_LIST_ELEMENT: string; + COMMITTED_ELEMENT: string; + DEFAULT_WRITE_PAGE_SIZE_IN_BYTES: number; + MIN_WRITE_PAGE_SIZE_IN_BYTES: number; + DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES: number; + DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES: number; + MAX_BLOCK_SIZE: number; + MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES: number; + MAX_RANGE_GET_SIZE_WITH_MD5: number; + MAX_UPDATE_PAGE_SIZE: number; + MAX_QUEUED_WRITE_DISK_BUFFER_SIZE: number; + MAX_SINGLE_GET_PAGE_RANGE_SIZE: number; + PAGE_SIZE: number; + ResourceTypes: { + CONTAINER: string; + BLOB: string; + }; + PageWriteOptions: { + UPDATE: string; + CLEAR: string; + }; + BlobTypes: { + BLOCK: string; + PAGE: string; + }; + LeaseOperation: { + ACQUIRE: string; + RENEW: string; + CHANGE: string; + RELEASE: string; + BREAK: string; + }; + }; + /** + * Defines constants for use with file operations. + */ + export var FileConstants: { + DEFAULT_WRITE_SIZE_IN_BYTES: number; + MAX_RANGE_GET_SIZE_WITH_MD5: number; + MAX_UPDATE_FILE_SIZE: number; + DEFAULT_SINGLE_FILE_GET_THRESHOLD_IN_BYTES: number; + MIN_WRITE_FILE_SIZE_IN_BYTES: number; + RangeWriteOptions: { + UPDATE: string; + CLEAR: string; + }; + }; + /** + * Defines constants for use with queue storage. + * + * @const + * @type {string} + */ + export var QueueConstants: { + QUEUE_MESSAGE_ELEMENT: string; + MESSAGE_TEXT_ELEMENT: string; + }; + /** + * Defines constants for use with table storage. + * + * @const + * @type {string} + */ + export var TableConstants: { + CHANGESET_DELIMITER: string; + BATCH_DELIMITER: string; + CONTINUATION_NEXT_ROW_KEY: string; + CONTINUATION_NEXT_PARTITION_KEY: string; + CONTINUATION_NEXT_TABLE_NAME: string; + NEXT_ROW_KEY: string; + NEXT_PARTITION_KEY: string; + NEXT_TABLE_NAME: string; + ODATA_PREFIX: string; + ODATA_TYPE_SUFFIX: string; + ODATA_METADATA_MARKER: string; + ODATA_VALUE_MARKER: string; + ODATA_TYPE_MARKER: string; + DEFAULT_DATA_SERVICE_VERSION: string; + TABLE_NAME: string; + TABLE_SERVICE_TABLE_NAME: string; + Operations: { + RETRIEVE: string; + INSERT: string; + UPDATE: string; + MERGE: string; + DELETE: string; + INSERT_OR_REPLACE: string; + INSERT_OR_MERGE: string; + }; + }; + /** + * Defines constants for use with HTTP headers. + */ + export var HeaderConstants: { + ACCEPT_RANGES: string; + CONTENT_TRANSFER_ENCODING_HEADER: string; + TRANSFER_ENCODING_HEADER: string; + SERVER_HEADER: string; + LOCATION_HEADER: string; + LAST_MODIFIED: string; + DATA_SERVICE_VERSION: string; + MAX_DATA_SERVICE_VERSION: string; + PREFIX_FOR_STORAGE_HEADER: string; + CLIENT_REQUEST_ID_HEADER: string; + APPROXIMATE_MESSAGES_COUNT: string; + AUTHORIZATION: string; + BLOB_PUBLIC_ACCESS_HEADER: string; + BLOB_TYPE_HEADER: string; + TYPE_HEADER: string; + BLOCK_BLOB: string; + CACHE_CONTROL: string; + BLOB_CACHE_CONTROL_HEADER: string; + CACHE_CONTROL_HEADER: string; + COPY_STATUS: string; + COPY_COMPLETION_TIME: string; + COPY_STATUS_DESCRIPTION: string; + COPY_ID: string; + COPY_PROGRESS: string; + COPY_ACTION: string; + CONTENT_ID: string; + CONTENT_ENCODING: string; + BLOB_CONTENT_ENCODING_HEADER: string; + CONTENT_ENCODING_HEADER: string; + CONTENT_LANGUAGE: string; + BLOB_CONTENT_LANGUAGE_HEADER: string; + CONTENT_LANGUAGE_HEADER: string; + CONTENT_LENGTH: string; + BLOB_CONTENT_LENGTH_HEADER: string; + CONTENT_LENGTH_HEADER: string; + CONTENT_DISPOSITION: string; + BLOB_CONTENT_DISPOSITION_HEADER: string; + CONTENT_DISPOSITION_HEADER: string; + CONTENT_MD5: string; + BLOB_CONTENT_MD5_HEADER: string; + CONTENT_MD5_HEADER: string; + CONTENT_RANGE: string; + CONTENT_TYPE: string; + BLOB_CONTENT_TYPE_HEADER: string; + CONTENT_TYPE_HEADER: string; + COPY_SOURCE_HEADER: string; + DATE: string; + DATE_HEADER: string; + DELETE_SNAPSHOT_HEADER: string; + ETAG: string; + IF_MATCH: string; + IF_MODIFIED_SINCE: string; + IF_NONE_MATCH: string; + IF_UNMODIFIED_SINCE: string; + INCLUDE_SNAPSHOTS_VALUE: string; + JSON_CONTENT_TYPE_VALUE: string; + LEASE_ID_HEADER: string; + LEASE_BREAK_PERIOD: string; + PROPOSED_LEASE_ID: string; + LEASE_DURATION: string; + SOURCE_LEASE_ID_HEADER: string; + LEASE_TIME_HEADER: string; + LEASE_STATUS: string; + LEASE_STATE: string; + PAGE_BLOB: string; + PAGE_WRITE: string; + FILE_WRITE: string; + PREFER: string; + PREFER_CONTENT: string; + PREFER_NO_CONTENT: string; + PREFIX_FOR_STORAGE_METADATA: string; + PREFIX_FOR_STORAGE_PROPERTIES: string; + RANGE: string; + RANGE_GET_CONTENT_MD5: string; + RANGE_HEADER_FORMAT: string; + REQUEST_ID_HEADER: string; + SEQUENCE_NUMBER: string; + SEQUENCE_NUMBER_EQUAL: string; + SEQUENCE_NUMBER_LESS_THAN: string; + SEQUENCE_NUMBER_LESS_THAN_OR_EQUAL: string; + SEQUENCE_NUMBER_ACTION: string; + SIZE: string; + SNAPSHOT_HEADER: string; + SNAPSHOTS_ONLY_VALUE: string; + SOURCE_IF_MATCH_HEADER: string; + SOURCE_IF_MODIFIED_SINCE_HEADER: string; + SOURCE_IF_NONE_MATCH_HEADER: string; + SOURCE_IF_UNMODIFIED_SINCE_HEADER: string; + STORAGE_RANGE_HEADER: string; + STORAGE_VERSION_HEADER: string; + TARGET_STORAGE_VERSION: string; + USER_AGENT: string; + POP_RECEIPT_HEADER: string; + TIME_NEXT_VISIBLE_HEADER: string; + APPROXIMATE_MESSAGE_COUNT_HEADER: string; + LEASE_ACTION_HEADER: string; + ACCEPT_HEADER: string; + ACCEPT_CHARSET_HEADER: string; + HOST_HEADER: string; + CORRELATION_ID_HEADER: string; + GROUP_ID_HEADER: string; + }; + export var QueryStringConstants: { + API_VERSION: string; + COMP: string; + RESTYPE: string; + COPY_ID: string; + SNAPSHOT: string; + TIMEOUT: string; + SIGNED_START: string; + SIGNED_EXPIRY: string; + SIGNED_RESOURCE: string; + SIGNED_PERMISSIONS: string; + SIGNED_IDENTIFIER: string; + SIGNATURE: string; + SIGNED_VERSION: string; + CACHE_CONTROL: string; + CONTENT_TYPE: string; + CONTENT_ENCODING: string; + CONTENT_LANGUAGE: string; + CONTENT_DISPOSITION: string; + BLOCK_ID: string; + BLOCK_LIST_TYPE: string; + PREFIX: string; + MARKER: string; + MAX_RESULTS: string; + DELIMITER: string; + INCLUDE: string; + PEEK_ONLY: string; + NUM_OF_MESSAGES: string; + POP_RECEIPT: string; + VISIBILITY_TIMEOUT: string; + MESSAGE_TTL: string; + SELECT: string; + FILTER: string; + TOP: string; + SKIP: string; + NEXT_PARTITION_KEY: string; + NEXT_ROW_KEY: string; + LOCK_ID: string; + TABLENAME: string; + STARTPK: string; + STARTRK: string; + ENDPK: string; + ENDRK: string; + }; + export var StorageServiceClientConstants: { + DEFAULT_PROTOCOL: string; + EnvironmentVariables: { + AZURE_STORAGE_ACCOUNT: string; + AZURE_STORAGE_ACCESS_KEY: string; + AZURE_STORAGE_DNS_SUFFIX: string; + AZURE_STORAGE_CONNECTION_STRING: string; + HTTP_PROXY: string; + HTTPS_PROXY: string; + EMULATED: string; + }; + DEVSTORE_STORAGE_ACCOUNT: string; + DEVSTORE_STORAGE_ACCESS_KEY: string; + DEV_STORE_URI: string; + DEVSTORE_DEFAULT_PROTOCOL: string; + DEVSTORE_BLOB_HOST: string; + DEVSTORE_QUEUE_HOST: string; + DEVSTORE_TABLE_HOST: string; + CLOUD_BLOB_HOST: string; + CLOUD_QUEUE_HOST: string; + CLOUD_TABLE_HOST: string; + CLOUD_FILE_HOST: string; + }; + export module HttpConstants { + /** + * Http Verbs + * + * @const + * @enum {string} + */ + var HttpVerbs: { + PUT: string; + GET: string; + DELETE: string; + POST: string; + MERGE: string; + HEAD: string; + }; + /** + * Response codes. + * + * @const + * @enum {int} + */ + enum HttpResponseCodes { + Ok = 200, + Created = 201, + Accepted = 202, + NoContent = 204, + PartialContent = 206, + BadRequest = 400, + Unauthorized = 401, + Forbidden = 403, + NotFound = 404, + Conflict = 409, + LengthRequired = 411, + PreconditionFailed = 412, + } + } + export var VersionConstants: { + AUGUST_2013: string; + FEBRUARY_2012: string; + }; + export var BlobErrorCodeStrings: { + INVALID_BLOCK_ID: string; + BLOB_NOT_FOUND: string; + BLOB_ALREADY_EXISTS: string; + CONTAINER_ALREADY_EXISTS: string; + CONTAINER_NOT_FOUND: string; + INVALID_BLOB_OR_BLOCK: string; + INVALID_BLOCK_LIST: string; + }; + export var FileErrorCodeStrings: { + SHARE_ALREADY_EXISTS: string; + SHARE_NOT_FOUND: string; + FILE_NOT_FOUND: string; + }; + export var QueueErrorCodeStrings: { + QUEUE_NOT_FOUND: string; + QUEUE_DISABLED: string; + QUEUE_ALREADY_EXISTS: string; + QUEUE_NOT_EMPTY: string; + QUEUE_BEING_DELETED: string; + POP_RECEIPT_MISMATCH: string; + INVALID_PARAMETER: string; + MESSAGE_NOT_FOUND: string; + MESSAGE_TOO_LARGE: string; + INVALID_MARKER: string; + }; + export var StorageErrorCodeStrings: { + UNSUPPORTED_HTTP_VERB: string; + MISSING_CONTENT_LENGTH_HEADER: string; + MISSING_REQUIRED_HEADER: string; + MISSING_REQUIRED_XML_NODE: string; + UNSUPPORTED_HEADER: string; + UNSUPPORTED_XML_NODE: string; + INVALID_HEADER_VALUE: string; + INVALID_XML_NODE_VALUE: string; + MISSING_REQUIRED_QUERY_PARAMETER: string; + UNSUPPORTED_QUERY_PARAMETER: string; + INVALID_QUERY_PARAMETER_VALUE: string; + OUT_OF_RANGE_QUERY_PARAMETER_VALUE: string; + INVALID_URI: string; + INVALID_HTTP_VERB: string; + EMPTY_METADATA_KEY: string; + REQUEST_BODY_TOO_LARGE: string; + INVALID_XML_DOCUMENT: string; + INTERNAL_ERROR: string; + AUTHENTICATION_FAILED: string; + MD5_MISMATCH: string; + INVALID_MD5: string; + OUT_OF_RANGE_INPUT: string; + INVALID_INPUT: string; + OPERATION_TIMED_OUT: string; + RESOURCE_NOT_FOUND: string; + RESOURCE_ALREADY_EXISTS: string; + INVALID_METADATA: string; + METADATA_TOO_LARGE: string; + CONDITION_NOT_MET: string; + UPDATE_CONDITION_NOT_SATISFIED: string; + INVALID_RANGE: string; + CONTAINER_NOT_FOUND: string; + CONTAINER_ALREADY_EXISTS: string; + CONTAINER_DISABLED: string; + CONTAINER_BEING_DELETED: string; + SERVER_BUSY: string; + }; + export var TableErrorCodeStrings: { + XMETHOD_NOT_USING_POST: string; + XMETHOD_INCORRECT_VALUE: string; + XMETHOD_INCORRECT_COUNT: string; + TABLE_HAS_NO_PROPERTIES: string; + DUPLICATE_PROPERTIES_SPECIFIED: string; + TABLE_HAS_NO_SUCH_PROPERTY: string; + DUPLICATE_KEY_PROPERTY_SPECIFIED: string; + TABLE_ALREADY_EXISTS: string; + TABLE_NOT_FOUND: string; + ENTITY_NOT_FOUND: string; + ENTITY_ALREADY_EXISTS: string; + PARTITION_KEY_NOT_SPECIFIED: string; + OPERATOR_INVALID: string; + UPDATE_CONDITION_NOT_SATISFIED: string; + PROPERTIES_NEED_VALUE: string; + PARTITION_KEY_PROPERTY_CANNOT_BE_UPDATED: string; + TOO_MANY_PROPERTIES: string; + ENTITY_TOO_LARGE: string; + PROPERTY_VALUE_TOO_LARGE: string; + INVALID_VALUE_TYPE: string; + TABLE_BEING_DELETED: string; + TABLE_SERVER_OUT_OF_MEMORY: string; + PRIMARY_KEY_PROPERTY_IS_INVALID_TYPE: string; + PROPERTY_NAME_TOO_LONG: string; + PROPERTY_NAME_INVALID: string; + BATCH_OPERATION_NOT_SUPPORTED: string; + JSON_FORMAT_NOT_SUPPORTED: string; + METHOD_NOT_ALLOWED: string; + NOT_IMPLEMENTED: string; + }; + export var ConnectionStringKeys: { + USE_DEVELOPMENT_STORAGE_NAME: string; + DEVELOPMENT_STORAGE_PROXY_URI_NAME: string; + DEFAULT_ENDPOINTS_PROTOCOL_NAME: string; + ACCOUNT_NAME_NAME: string; + ACCOUNT_KEY_NAME: string; + BLOB_ENDPOINT_NAME: string; + FILE_ENDPOINT_NAME: string; + QUEUE_ENDPOINT_NAME: string; + TABLE_ENDPOINT_NAME: string; + SHARED_ACCESS_SIGNATURE_NAME: string; + BLOB_BASE_DNS_NAME: string; + FILE_BASE_DNS_NAME: string; + QUEUE_BASE_DNS_NAME: string; + TABLE_BASE_DNS_NAME: string; + }; + + } + + // ########################### + // ./common/util/storageutilities + // ########################### + module storageutilities { + /** + * Defines constants, enums, and utility functions for use with storage. + * @namespace StorageUtilities + */ + /** + * Specifies the location mode used to decide which location the request should be sent to. + * + * @const + * @enum {number} + * @alias StorageUtilities.LocationMode + */ + export enum LocationMode { + /** + * The primary location only + * @property LocationMode.PRIMARY_ONLY + */ + PRIMARY_ONLY = 0, + /** + * The primary location first, then the secondary + * @property LocationMode.PRIMARY_THEN_SECONDARY + */ + PRIMARY_THEN_SECONDARY = 1, + /** + * The secondary location only + * @property LocationMode.SECONDARY_ONLY + */ + SECONDARY_ONLY = 2, + /** + * The secondary location first, then the primary + * @property LocationMode.SECONDARY_THEN_PRIMARY + */ + SECONDARY_THEN_PRIMARY = 3, + } + } + + // ########################### + // ./common/util/accesscondition + // ########################### + module accesscondition { + /** + * Constructs an empty access condition. + * + * @return {AccessConditions} An empty AccessConditions object + */ + export function generateEmptyCondition() : AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource does not exist on the service + * + * Setting this access condition modifies the request to include the HTTP If-None-Match conditional header + + * @return {AccessConditions} An AccessConditions object that represents a condition that checks for nonexistence + */ + export function generateIfNotExistsCondition(): AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource exists on the service + * + * Setting this access condition modifies the request to include the HTTP If-Match conditional header + + * @return {AccessConditions} An AccessConditions object that represents a condition that checks for existence + */ + export function generateIfExistsCondition(): AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource's ETag value + * does not match the specified ETag value + * + * Setting this access condition modifies the request to include the HTTP If-None-Match conditional header + * + * @param {string} etag The ETag value to check against the resource's ETag + * @return {AccessConditions} An AccessConditions object that represents the If-None-Match condition + */ + export function generateIfNoneMatchCondition(etag: string) : AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource's ETag value + * matches the specified ETag value + * + * Setting this access condition modifies the request to include the HTTP If-Match conditional header + * + * @param {string} etag The ETag value to check against the resource's ETag + * @return {AccessConditions} An AccessConditions object that represents the If-Match condition + */ + export function generateIfMatchCondition(etag: string) : AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource has been + * modified since the specified time + * + * Setting this access condition modifies the request to include the HTTP If-Modified-Since conditional header + * + * @param {Date|string} time A date object specifying the time since which the resource must have been modified + * @return {AccessConditions} An AccessConditions object that represents the If-Modified-Since condition + */ + export function generateIfModifiedSinceCondition(time: Date|string) : AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource has not been + * modified since the specified time + * + * Setting this access condition modifies the request to include the HTTP If-Unmodified-Since conditional header + * + * @param {Date|string} time A date object specifying the time since which the resource must have not been modified + * @return {AccessConditions} An AccessConditions object that represents the If-Unmodified-Since condition + */ + export function generateIfNotModifiedSinceCondition(time: Date|string) : AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource's sequence number + * is equal to the specified value + * + * Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-eq conditional header + * + * @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified + * @return {AccessConditions} An AccessConditions object that represents the If-Unmodified-Since condition + */ + export function generateSequenceNumberEqualCondition(sequenceNumber: Number|string) : AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource's sequence number + * is less than the specified value + * + * Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-lt conditional header + * + * @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified + * @return {AccessConditions} An AccessConditions object that represents the If-Unmodified-Since condition + */ + export function generateSequenceNumberLessThanCondition(sequenceNumber: Number|string) : AccessConditions; + + /** + * Constructs an access condition such that an operation will be performed only if the resource's sequence number + * is less than or equal to the specified value + * + * Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-le conditional header + * + * @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified + * @return {AccessConditions} An AccessConditions object that represents the If-Unmodified-Since condition + */ + export function generateSequenceNumberLessThanOrEqualCondition(sequenceNumber: Number|string) : AccessConditions; + } + + // ########################### + // ./common/util/sr + // ########################### + module sr { + export var SR: { + ANONYMOUS_ACCESS_BLOBSERVICE_ONLY: string; + ARGUMENT_NULL_OR_EMPTY: string; + ARGUMENT_NULL_OR_UNDEFINED: string; + ARGUMENT_OUT_OF_RANGE_ERROR: string; + BATCH_ONE_PARTITION_KEY: string; + BATCH_ONE_RETRIEVE: string; + BATCH_TOO_LARGE: string; + HASH_MISMATCH: string; + BLOB_INVALID_SEQUENCE_NUMBER: string; + BLOB_TYPE_MISMATCH: string; + CONTENT_LENGTH_MISMATCH: string; + INVALID_DELETE_SNAPSHOT_OPTION: string; + CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY: string; + CONTENT_TYPE_MISSING: string; + EMPTY_BATCH: string; + EXCEEDED_SIZE_LIMITATION: string; + INCORRECT_ENTITY_KEYS: string; + INVALID_BLOB_LENGTH: string; + INVALID_FILE_LENGTH: string; + INVALID_CONNECTION_STRING: string; + INVALID_CONNECTION_STRING_BAD_KEY: string; + INVALID_CONNECTION_STRING_DUPLICATE_KEY: string; + INVALID_CONNECTION_STRING_EMPTY_KEY: string; + INVALID_EDM_TYPE: string; + INVALID_HEADERS: string; + INVALID_MESSAGE_ID: string; + INVALID_PAGE_BLOB_LENGTH: string; + INVALID_PAGE_END_OFFSET: string; + INVALID_PAGE_START_OFFSET: string; + INVALID_FILE_RANGE_FOR_UPDATE: string; + INVALID_PAGE_RANGE_FOR_UPDATE: string; + INVALID_POP_RECEIPT: string; + INVALID_PROPERTY_RESOLVER: string; + INVALID_RANGE_FOR_MD5: string; + INVALID_SAS_VERSION: string; + INVALID_SIGNED_IDENTIFIERS: string; + INVALID_STREAM_LENGTH: string; + INVALID_STRING_ERROR: string; + INVALID_TEXT_LENGTH: string; + QUERY_OPERATOR_REQUIRES_WHERE: string; + MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION: string; + MD5_NOT_PRESENT_ERROR: string; + METADATA_KEY_INVALID: string; + METADATA_VALUE_INVALID: string; + NO_CREDENTIALS_PROVIDED: string; + INVALID_TABLE_OPERATION: string; + PRIMARY_ONLY_COMMAND: string; + SECONDARY_ONLY_COMMAND: string; + STORAGE_HOST_LOCATION_REQUIRED: string; + STORAGE_HOST_MISSING_LOCATION: string; + TYPE_NOT_SUPPORTED: string; + NO_LIST_FUNC_PROVIDED: string; + }; + } + + // ########################### + // ./common/util/validate + // ########################### + module validate { + /** + * Checks if the given value is a valid enumeration or not. + * + * @param {Object} value The value to validate. + * @param {Object} list The enumeration values. + * @return {boolean} + */ + export function isValidEnumValue(value: string, list: string[], callback: Function): boolean; + /** + * Creates a anonymous function that check if the given uri is valid or not. + * + * @param {string} uri The uri to validate. + * @return {boolean} + */ + export function isValidUri(uri: string): boolean; + /** + * Checks if the given host is valid or not. + * + * @param {string|object} host The host to validate. + * @return {boolean} + */ + export function isValidHost(host: string): boolean; + export function isValidHost(host: StorageHost): boolean; + /** + * Checks if the given value is a valid UUID or not. + * + * @param {string|object} uuid The uuid to validate. + * @return {boolean} + */ + export function isValidUuid(uuid: string, callback?: Function): boolean; + /** + * Creates a anonymous function that check if a given key is base 64 encoded. + * + * @param {string} key The key to validate. + * @return {boolean} + */ + export function isBase64Encoded(key: string): boolean; + /** + * Validates a function. + * + * @param {Object} function The function to validate. + * @return {boolean} + */ + export function isValidFunction(functionObject: any, functionName: string): boolean; + /** + * Validates a container name. + * + * @param {string} containerName The container name. + */ + export function containerNameIsValid(containerName: string, callback?: Function): boolean; + /** + * Validates a blob name. + * + * @param {string} containerName The container name. + * @param {string} blobname The blob name. + */ + export function blobNameIsValid(containerName: string, blobName: string, callback?: Function): boolean; + /** + * Validates a share name. + * + * @param {string} shareName The share name. + */ + export function shareNameIsValid(shareName: string, callback?: Function): boolean; + /** + * Validates a queue name. + * + * @param {string} queueName The queue name. + */ + export function queueNameIsValid(queueName: string, callback?: Function): boolean; + /** + * Validates a table name. + * + * @param {string} table The table name. + */ + export function tableNameIsValid(table: string, callback?: Function): boolean; + /** + * Validates page ranges. + * + * @param {int} rangeStart The range starting position. + * @param {int} rangeEnd The range ending position. + * @param {int} writeBlockSizeInBytes The block size. + */ + export function pageRangesAreValid(rangeStart: number, rangeEnd: number, writeBlockSizeInBytes: number, callback?: Function): boolean; + /** + * Validates a blob type. + * + * @param {string} type The type name. + */ + export function blobTypeIsValid(type: string, callback?: Function): boolean; + export class ArgumentValidator { + func: string; + tableNameIsValid: (tableName: string, callback?: Function) => boolean; + containerNameIsValid: (containerName: string, callback?: Function) => boolean; + shareNameIsValid: (shareName: string, callback?: Function) => boolean; + blobNameIsValid: (containerName: string, blobName: string, callback?: Function) => boolean; + pageRangesAreValid: (rangeStart: number, rangeEnd: number, writeBlockSizeInBytes: number, callback?: Function) => boolean; + queueNameIsValid: (queueName: string, callback?: Function) => boolean; + blobTypeIsValid: (shareName: string, callback?: Function) => boolean; + isValidEnumValue: (value: string, list: string[], callback?: Function) => boolean; + constructor(functionName: string); + string(val: any, name: string): void; + stringAllowEmpty(val: any, name: string): void; + object(val: any, name: string): void; + exists(val: any, name: string): void; + function(val: any, name: string): void; + value(val: any, name: string): void; + nonEmptyArray(val: T[], name: string): void; + callback(val: any): void; + test(predicate: () => boolean, message: string): void; + } + export function validateArgs(functionName: string, validationRules: (validator: ArgumentValidator) => void): void; + } + + // ########################### + // ./common/util/date + // ########################### + module date { + /** + * Date/time related helper functions + * @module date + * + */ + /** + * Generates a Date object which is in the given days from now. + * + * @param {int} days The days timespan. + * @return {Date} + */ + export function daysFromNow(days: number): Date; + /** + * Generates a Date object which is in the given hours from now. + * + * @param {int} hours The hours timespan. + * @return {Date} + */ + export function hoursFromNow(hours: number): Date; + /** + * Generates a Date object which is in the given minutes from now. + * + * @param {int} minutes The minutes timespan. + * @return {Date} + */ + export function minutesFromNow(minutes: number): Date; + /** + * Generates a Date object which is in the given seconds from now. + * + * @param {int} seconds The seconds timespan. + * @return {Date} + */ + export function secondsFromNow(seconds: number): Date; + + } + } + + module http { + // ########################### + // ./common/http/webresource + // ########################### + module webresource { + export class WebResource { + rawResponse: boolean; + queryString: any; + path: string; + method: string; + properties: Map; + body: any; + headersOnly: boolean; + uri: string; + headers: Map; + /** + * Creates a new WebResource object. + */ + constructor(); + /** + * Creates a new put request web resource. + * + * @function WebResource#put + * @static + * @param {string} path The path for the put operation. + * @return {WebResource} A new webresource with a put operation for the given path. + */ + static put(path?: string): WebResource; + /** + * Creates a new get request web resource. + * + * @function WebResource#get + * @static + * @param {string} path The path for the get operation. + * @return {WebResource} A new webresource with a get operation for the given path. + */ + static get(path?: string): WebResource; + /** + * Creates a new head request web resource. + * + * @function WebResource#head + * @static + * @param {string} path The path for the head operation. + * @return {WebResource} A new webresource with a head operation for the given path. + */ + static head(path: string): WebResource; + /** + * Creates a new delete request web resource. + * + * @function WebResource#del + * @static + * @param {string} path The path for the delete operation. + * @return {WebResource} A new webresource with a delete operation for the given path. + */ + static del(path: string): WebResource; + /** + * Creates a new post request web resource. + * + * @function WebResource#post + * @static + * @param {string} path The path for the post operation. + * @return {WebResource} A new webresource with a post operation for the given path. + */ + static post(path: string): WebResource; + /** + * Creates a new merge request web resource. + * + * @function WebResource#merge + * @static + * @param {string} path The path for the merge operation. + * @return {WebResource} A new webresource with a merge operation for the given path. + */ + static merge(path: string): WebResource; + /** + * Specifies a custom property in the web resource. + * + * @function WebResource#withProperty + * @param {string} name The property name. + * @param {string} value The property value. + * @return {WebResource} The webresource. + */ + withProperty(name: string, value: string): WebResource; + /** + * Specifies if the response should be parsed or not. + * + * @function WebResource#withRawResponse + * @param {bool} [rawResponse=true] true if the response should not be parsed; false otherwise. + * @return {WebResource} The webresource. + */ + withRawResponse(rawResponse?: boolean): WebResource; + /** + * Specifies if the request only has headers. + * + * @function WebResource#withHeadersOnly + * @param {bool} [headersOnly=true] true if the request only has headers; false otherwise. + * @return {WebResource} The webresource. + */ + withHeadersOnly(headersOnly?: boolean): WebResource; + /** + * Adds an optional query string parameter. + * + * @function WebResource#withQueryOption + * @param {Object} name The name of the query string parameter. + * @param {Object} value The value of the query string parameter. + * @param {Object} defaultValue The default value for the query string parameter to be used if no value is passed. + * @return {Object} The web resource. + */ + withQueryOption(name: any, value: T, defaultValue?: T): WebResource; + /** + * Adds optional query string parameters. + * + * Additional arguments will be the needles to search in the haystack. + * + * @function WebResource#withQueryOptions + * @param {Object} object The haystack of query string parameters. + * @return {Object} The web resource. + */ + withQueryOptions(object: any, ...queryArgs: any[]): WebResource; + /** + * Adds an optional header parameter. + * + * @function WebResource#withHeader + * @param {Object} name The name of the header parameter. + * @param {Object} value The value of the header parameter. + * @return {Object} The web resource. + */ + withHeader(name: string, value: T): WebResource; + /** + * Adds an optional body. + * + * @function WebResource#withBody + * @param {Object} body The request body. + * @return {Object} The web resource. + */ + withBody(body: any): WebResource; + /** + * Adds optional query string parameters. + * + * Additional arguments will be the needles to search in the haystack. + * + * @function WebResource#withHeaders + * @param {Object} object The haystack of headers. + * @return {Object} The web resource. + withHeaders(object: { + [x: string]: any; + }, ...args: string[]): WebResource; + addOptionalMetadataHeaders(metadata: any): WebResource; + /** + * Determines if a status code corresponds to a valid response according to the WebResource's expected status codes. + * + * @function WebResource#validResponse + * @static + * @param {int} statusCode The response status code. + * @return true if the response is valid; false otherwise. + */ + static validResponse(statusCode: number): boolean; + /** + * Hook up the given input stream to a destination output stream if the WebResource method + * requires a request body and a body is not already set. + * + * @function WebResource#pipeInput + * @param {Stream} inputStream the stream to pipe from + * @param {Stream} outputStream the stream to pipe to + * + * @return destStream + */ + pipeInput(inputStream: NodeJS.ReadableStream, destStream: NodeJS.WritableStream): NodeJS.WritableStream; + } + } + } + + module diagnostics { + // ########################### + // ./common/loggerdiagnostics/logger + // ########################### + module logger { + export class Logger { + level: string; + loggerFunction: (level: string, message: string) => void; + constructor(level: any, loggerFunction?: (level: string, message: string) => void); + static LogLevels: { + EMERGENCY: string; + ALERT: string; + CRITICAL: string; + ERROR: string; + WARNING: string; + NOTICE: string; + INFO: string; + DEBUG: string; + }; + log(level: any, msg: any): void; + emergency(msg: any): void; + critical(msg: any): void; + alert(msg: any): void; + error(msg: any): void; + warn(msg: any): void; + notice(msg: any): void; + info(msg: any): void; + debug(msg: any): void; + defaultLoggerFunction(logLevel: string, msg: string): void; + } + } + } + + module streams { + // ########################### + // ./common/util/speedsummary + // ########################### + module speedsummary { + export interface SpeedSummary extends events.EventEmitter { + name: string; + totalSize: number; + completeSize: number; + /** + * Get running seconds + */ + getElapsedSeconds(humanReadable: boolean): number; + /** + * Get complete percentage + * @param {int} len The number of digits after the decimal point. + */ + getCompletePercent(len: number): number; + /** + * Get average upload/download speed + */ + getAverageSpeed(humanReadable: boolean): string; + /** + * Get instant speed + */ + getSpeed(humanReadable: boolean): string | number; + /** + * Increment the complete data size + */ + increment(len: number): number; + /** + * Get auto increment function + */ + getAutoIncrementFunction(size: number): (error: any, retValue: number) => void; + /** + * Get total size + */ + getTotalSize(humanReadable: boolean): string | number; + /** + * Get completed data size + */ + getCompleteSize(humanReadable: boolean): string | number; + } + } + } + + module models { + export interface ServiceStats { + GeoReplication?: { + Status?: string; + LastSyncTime?: Date; + }; + } + + export interface AccountProperties { + SkuName: string; + AccountKind: string; + } + + module ServicePropertiesResult { + export interface RetentionPolicy { + Enabled: boolean; + Days: number; + } + export interface MetricsProperties { + Version: string; + Enabled: boolean; + IncludeAPIs: boolean; + RetentionPolicy: RetentionPolicy; + } + export interface CorsRule { + AllowedMethods: string[]; + AllowedOrigins: string[]; + AllowedHeaders: string[]; + ExposedHeaders: string[]; + MaxAgeInSeconds: number; + } + export interface LoggingProperties { + Version: string; + Delete: boolean; + Read: boolean; + Write: boolean; + RetentionPolicy: RetentionPolicy; + } + export interface DeleteRetentionPolicyProperties { + Enabled: boolean; + Days?: number; + } + export interface ServiceProperties { + DefaultServiceVersion?: string; + Logging?: LoggingProperties; + DeleteRetentionPolicy?: DeleteRetentionPolicyProperties; + HourMetrics?: MetricsProperties; + MinuteMetrics?: MetricsProperties; + Cors?: { + CorsRule: CorsRule[]; + }; + } + export interface StaticWebsiteProperties { + Enabled: boolean; + IndexDocument?: string; + ErrorDocument404Path?: string; + } + export interface BlobServiceProperties extends ServiceProperties { + StaticWebsite?: StaticWebsiteProperties; + } + export function serialize(servicePropertiesJs: ServiceProperties): string; + export function parse(servicePropertiesXml: any): ServiceProperties; + } + + module tokenCredential { + export class TokenCredential { + token: string; + constructor(token: string); + get(): string; + set(token: string): void; + } + } + } + + module services { + // ########################### + // ./common/services/storageserviceclient + // ########################### + module storageserviceclient { + export interface Proxy { + host: string; + port: number; + proxyAuth: string; + headers: Map; + key: string; + ca: string; + cert: string; + } + + export class StorageServiceClient extends events.EventEmitter { + /** + * The default location mode for requests made via the service. + * @member {StorageUtilities.LocationMode} StorageServiceClient#defaultLocationMode + */ + defaultLocationMode: common.util.storageutilities.LocationMode; + /** + * The default maximum execution time across all potential retries, for requests made via the service. + * @member {int} StorageServiceClient#defaultMaximumExecutionTimeInMs + */ + defaultMaximumExecutionTimeInMs: number; + /** + * The default timeout interval, in milliseconds, to use for request made via the service. + * @member {int} StorageServiceClient#defaultTimeoutIntervalInMs + */ + defaultTimeoutIntervalInMs: number; + /** + * The default timeout of client requests, in milliseconds, to use for the request. + * @member {int} StorageServiceClient#defaultClientRequestTimeoutInMs + */ + defaultClientRequestTimeoutInMs: number; + /** + * Determines whether the Nagle algorithm is used for requests made via the Queue service; true to use the + * Nagle algorithm; otherwise, false. The default value is false. + * @member {bool} StorageServiceClient#useNagleAlgorithm + */ + useNagleAlgorithm: boolean; + /** + * Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use + * http(s).Agent({keepAlive:true}). + * @member {bool} StorageServiceClient#enableGlobalHttpAgent + */ + enableGlobalHttpAgent: boolean; + /** The proxy object specified by caller. + * @member {Proxy} StorageServiceClient#proxy + */ + proxy: Proxy; + /** The logging settings object. + * @member {diagnostics.logger.Logger} StorageServiceClient#logger + */ + logger: diagnostics.logger.Logger; + + /** + * Creates a new StorageServiceClient object. + * + * @constructor StorageServiceClient + * @param {string} storageAccount The storage account. + * @param {string} storageAccessKey The storage access key. + * @param {Object} host The host for the service. + * @param {bool} usePathStyleUri Boolean value indicating wether to use path style uris. + * @param {string} sasToken The Shared Access Signature token. + */ + constructor(storageAccount?: string, storageAccessKey?: string, host?: StorageHost, usePathStyleUri?: boolean, sasToken?: string); + /** + * Associate a filtering operation with this StorageServiceClient. Filtering operations + * can include logging, automatically retrying, etc. Filter operations are objects + * that implement a method with the signature: + * + * "function handle (requestOptions, next)". + * + * After doing its preprocessing on the request options, the method needs to call + * "next" passing a callback with the following signature: + * signature: + * + * "function (returnObject, finalCallback, next)" + * + * In this callback, and after processing the returnObject (the response from the + * request to the server), the callback needs to either invoke next if it exists to + * continue processing other filters or simply invoke finalCallback otherwise to end + * up the service invocation. + * + * @function StorageServiceClient#withFilter + * @param {Object} filter The new filter object. + * @return {StorageServiceClient} A new service client with the filter applied. + */ + withFilter(newFilter: common.filters.IFilter): StorageServiceClient; + /** + * Sets proxy object specified by caller. + * + * @function StorageServiceClient#setProxy + * @param {(object|string)} proxy proxy to use for tunneling + * { + * host: hostname + * port: port number + * proxyAuth: 'user:password' for basic auth + * headers: {...} headers for proxy server + * key: key for proxy server + * ca: ca for proxy server + * cert: cert for proxy server + * } + * if null or undefined, clears proxy + */ + setProxy(proxy: Proxy): void; + } + } + } + + export interface AccessPolicy { + /** The permission type. */ + Permissions: string; + /** The time at which the Shared Access Signature becomes valid. */ + Start?: Date | string; + /** The time at which the Shared Access Signature becomes expired. */ + Expiry?: Date | string; + /** An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. */ + IPAddressOrRange?: string; + /** The protocol permitted for a request made with the SAS. */ + Protocols?: string; + /** The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. */ + Services?: string; + /** The resource type for a shared access signature associated with this shared access policy. */ + ResourceTypes?: string; + } + + export interface SharedAccessPolicy { + /** The signed identifier. */ + Id?: string; + /** The Access Policy information */ + AccessPolicy: AccessPolicy; + } + + export interface ContentSettingsHeaders { + cacheControl?: string; + contentType?: string; + contentEncoding?: string; + contentLanguage?: string; + contentDisposition?: string; + } + + /** + * Common request options for azure storage services + */ + export interface RequestOptions { + /** + * {LocationMode} Specifies the location mode used to decide which location the request should be sent to. + */ + locationMode?: StorageUtilities.LocationMode; + /** + * {int} The server timeout interval, in milliseconds, to use for the request. + */ + timeoutIntervalInMs?: number; + /** + * {int} The timeout of client requests, in milliseconds, to use for the request. + */ + clientRequestTimeoutInMs?: number; + + /** + * {int} The maximum execution time, in milliseconds, across all potential retries, to use when making this request. + */ + maximumExecutionTimeInMs?: number; + /** + * {bool} Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. + */ + useNagleAlgorithm?: boolean; + /** + * {string} A string that represents the client request ID with a 1KB character limit. + */ + clientRequestId?: string; + } + + export interface ContinuationToken { + nextMarker: string; + targetLocation?: Constants.StorageLocation; + } + + export interface Range { + start?: number; + end?: number; + } + + export interface RangeDiff { + start?: number; + end?: number; + isCleared?: boolean + } + } + + /** + * Creates a connection string that can be used to create a service which runs on the storage emulator. The emulator must be downloaded separately. + * + * @param {string} [proxyUri] The proxyUri. By default, http://127.0.0.1 + * @return {string} A connection string representing the development storage credentials. + * @example + * var azure = require('azure-storage'); + * var devStoreCreds = azure.generateDevelopmentStorageCredentials(); + * var blobService = azure.createBlobService(devStoreCreds); + */ + export function generateDevelopmentStorageCredentials(proxyUri?: string): string; + + /** + * Table client exports + * @ignore + */ + export import TableService = services.table.TableService; + export import TableQuery = services.table.TableQuery; + export import TableBatch = services.table.TableBatch; + export import TableUtilities = services.table.TableUtilities; + + /** + * Creates a new {@link TableService} object. + * If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY + * environment variables will be used. + * + * @param {string} [storageAccountOrConnectionString] The storage account or the connection string. + * @param {string} [storageAccessKey] The storage access key. + * @param {string|object} [host] The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @return {TableService} A new TableService object. + * + */ + export function createTableService(): TableService; + export function createTableService(connectionString: string): TableService; + export function createTableService(storageAccountOrConnectionString: string, storageAccessKey: string, host?: StorageHost): TableService; + + /** + * Creates a new {@link TableService} object using the host Uri and the SAS credentials provided. + * + * @param {string|object} host The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {string} sasToken The Shared Access Signature token. + * @return {TableService} A new TableService object with the SAS credentials. + */ + export function createTableServiceWithSas(hostUri: string | StorageHost, sasToken: string): TableService; + + /** + * Blob client exports + * @ignore + */ + export import BlobService = services.blob.blobservice.BlobService; + export import BlobUtilities = services.blob.blobutilities.BlobUtilities; + + /** + * Creates a new {@link BlobService} object. + * If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY + * environment variables will be used. + * + * @param {string} storageAccountOrConnectionString The storage account or the connection string. + * @param {string} [storageAccessKey] The storage access key. + * @param {string|object} [host] The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @return {BlobService} A new BlobService object. + */ + export function createBlobService(storageAccount: string, storageAccessKey: string, host?: string|StorageHost): BlobService; + export function createBlobService(connectionString: string): BlobService; + export function createBlobService(): BlobService; + /** + * Creates a new {@link BlobService} object using the host Uri and the SAS credentials provided. + * + * @param {string|object} host The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {string} sasToken The Shared Access Signature token. + * @return {BlobService} A new BlobService object with the SAS credentials. + */ + export function createBlobServiceWithSas(host: string|StorageHost, sasToken: string): BlobService; + + /** + * Creates a new {@link BlobService} object using the host Uri and the {@link TokenCredential} provided, which supports OAuth. + * + * @param {string|object} host The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {TokenCredential} token The TokenCredential object. + * @return {BlobService} A new BlobService object with the {@link TokenCredential} credentials. + * + * @example + * var azure = require('azure-storage'); + * var tokenCredential = new azure.TokenCredential('myOAuthAccessToken'); + * var blobService = azure.createBlobServiceWithTokenCredential('https://account.blob.core.windows.net', tokenCredential); + * tokenCredential.set('updatedOAuthAccessToken'); + */ + export function createBlobServiceWithTokenCredential(host: string|StorageHost, token: TokenCredential): BlobService; + + /** + * Creates a new {@link BlobService} object using the host uri and anonymous access. + * + * @param {string|object} host The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @return {BlobService} A new BlobService object with the anonymous credentials. + */ + export function createBlobServiceAnonymous(host?: string|StorageHost): BlobService; + + ///** + // * File client exports + // * @ignore + // */ + export import FileService = services.file.FileService; + export import FileUtilities = services.file.FileUtilities; + + ///** + //* Creates a new {@link FileService} object. + //* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY + //* environment variables will be used. + //* + //* @param {string} storageAccountOrConnectionString The storage account or the connection string. + //* @param {string} [storageAccessKey] The storage access key. + //* @param {string|object} [host] The host address. To define primary only, pass a string. + //* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + //* @return {FileService} A new FileService object. + //*/ + export function createFileService(storageAccount: string, storageAccessKey: string, host?: string | StorageHost): FileService; + export function createFileService(connectionString: string): FileService; + export function createFileService(): FileService; + + /** + * Creates a new {@link FileService} object using the host Uri and the SAS credentials provided. + * + * @param {string|object} host The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {string} sasToken The Shared Access Signature token. + * @return {FileService} A new FileService object with the SAS credentials. + */ + export function createFileServiceWithSas(hostUri: string | StorageHost, sasToken: string): FileService; + + /** + * Queue client exports + * @ignore + */ + export import QueueService = services.queue.QueueService; + export import QueueMessageEncoder = services.queue.QueueMessageEncoder; + export import QueueUtilities = services.queue.QueueUtilities; + + /** + * Creates a new {@link QueueService} object. + * If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY + * environment variables will be used. + * + * @param {string} [storageAccountOrConnectionString] The storage account or the connection string. + * @param {string} [storageAccessKey] The storage access key. + * @param {string|object} [host] The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @return {QueueService} A new QueueService object. + */ + export function createQueueService(storageAccount: string, storageAccessKey: string, host?: string | StorageHost): QueueService; + export function createQueueService(connectionString: string): QueueService; + export function createQueueService(): QueueService; + + /** + * Creates a new {@link QueueService} object using the host Uri and the SAS credentials provided. + * + * @param {string|object} host The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {string} sasToken The Shared Access Signature token. + * @return {QueueService} A new QueueService object with the SAS credentials. + */ + export function createQueueServiceWithSas(host: string | StorageHost, sasToken: string): QueueService; + + /** + * Creates a new {@link QueueService} object using the host Uri and the {@link TokenCredential} provided, which supports OAuth. + * + * @param {string|object} host The host address. To define primary only, pass a string. + * Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. + * @param {TokenCredential} token The TokenCredential object. + * @return {QueueService} A new QueueService object with the {@link TokenCredential} object. + * + * @example + * var azure = require('azure-storage'); + * var tokenCredential = new azure.TokenCredential('myOAuthAccessToken'); + * var queueService = azure.createQueueServiceWithTokenCredential('https://account.queue.core.windows.net', tokenCredential); + * tokenCredential.set('updatedOAuthAccessToken'); + */ + export function createQueueServiceWithTokenCredential(host: string | StorageHost, token: TokenCredential): QueueService; + + export function generateAccountSharedAccessSignature(storageAccountOrConnectionString: string, storageAccessKey: string, sharedAccessAccountPolicy: common.SharedAccessPolicy): string; + + interface StorageError extends Error { + statusCode?: number; + requestId?: string; + code?: string; + } + + interface ServiceResponse { + isSuccessful: boolean; + statusCode: number; + body?: string | Buffer; + headers?: Map; + md5: string; + error?: StorageError | Error; + requestServerEncrypted?: boolean; + } + + interface ServiceResult { + error: StorageError | Error; + response: ServiceResponse; + contentMD5: string; + length?: number; + operationEndTime: Date; + targetLocation: Constants.StorageLocation; + } + + /** + * A callback that returns a response object. + * @callback errorOrResponse + * @param {Object} error If an error occurs, will contain information about the error. + * @param {Object} response Contains information about the response returned for the operation. + * For example, HTTP status codes and headers. + */ + interface ErrorOrResponse { + (error: Error, response: ServiceResponse): void + } + /** + * A callback that returns result and response objects. + * @callback errorOrResult + * @param {Object} error If an error occurs, will contain information about the error. + * @param {Object} result The result of the operation. + * @param {Object} response Contains information about the response returned for the operation. + * For example, HTTP status codes and headers. + */ + interface ErrorOrResult { + (error: Error, result: TResult, response: ServiceResponse): void + } + /** + * Specifying conditional headers for blob service operations. See http://msdn.microsoft.com/en-us/library/dd179371.aspx for more information. + * @typedef {Object} AccessConditions + * @property {string} EtagMatch If the ETag for the blob matches the specified ETag. + * Specify the wildcard character (*) to perform the operation only if the resource does exist, and fail the operation if it does not exist. + * @property {string} EtagNonMatch If the ETag for the blob does not match the specified ETag. + * Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. + * @property {Date|string} DateModifedSince If the blob has been modified since the specified date. + * @property {Date|string} DateUnModifiedSince If the blob has not been modified since the specified date. + * @property {Number|string} SequenceNumberLessThanOrEqual If the blob’s sequence number is less than or equal to the specified value. + * For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. + * @property {Number|string} SequenceNumberLessThan If the blob’s sequence number is less than the specified value. + * For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. + * @property {Number|string} SequenceNumberEqual If the blob’s sequence number is equal to the specified value. + * For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. + * @property {Number|string} MaxBlobSize If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the specified value. + * For Append Block operation only. See https://msdn.microsoft.com/en-us/library/mt427365.aspx for more information. + * @property {Number|string} MaxAppendPosition If the append position is equal to the specified value. + * For Append Block operation only. See https://msdn.microsoft.com/en-us/library/mt427365.aspx for more information. + */ + export interface AccessConditions { + EtagMatch?: string; + EtagNonMatch?: string; + DateModifedSince?: Date | string; + DateUnModifiedSince?: Date | string; + SequenceNumberLessThanOrEqual?: Number | string; + SequenceNumberLessThan?: Number | string; + SequenceNumberEqual?: Number | string; + MaxBlobSize?: Number | string; + MaxAppendPosition?: Number | string; + } + + export import Constants = common.util.constants; + export import StorageUtilities = common.util.storageutilities; + export import AccessCondition = common.util.accesscondition; + export import SR = common.util.sr.SR; + export import StorageServiceClient = common.services.storageserviceclient.StorageServiceClient; + export import Logger = common.diagnostics.logger.Logger; + export import WebResource = common.http.webresource.WebResource; + export import Validate = common.util.validate; + export import date = common.util.date; + export import TokenCredential = common.models.tokenCredential.TokenCredential; + export import LinearRetryPolicyFilter = common.filters.linearretrypolicyfilter.LinearRetryPolicyFilter; + export import ExponentialRetryPolicyFilter = common.filters.exponentialretrypolicyfilter.ExponentialRetryPolicyFilter; + export import RetryPolicyFilter = common.filters.retrypolicyfilter.RetryPolicyFilter; +} + +export = azurestorage; diff --git a/src/node_modules/azure-storage/typings/globals/node/index.d.ts b/src/node_modules/azure-storage/typings/globals/node/index.d.ts new file mode 100644 index 0000000..26afe21 --- /dev/null +++ b/src/node_modules/azure-storage/typings/globals/node/index.d.ts @@ -0,0 +1,1440 @@ +// Generated by typings +// Source: https://raw.githubusercontent.com/types/env-node/959285e4da295481cf634f7d11f6ccccc863e430/0.10/node.d.ts +declare var process: NodeJS.Process; +declare var global: any; + +declare var __filename: string; +declare var __dirname: string; + +declare function setTimeout(callback: (...args: any[]) => void, ms: number, ...args: any[]): NodeJS.Timer; +declare function clearTimeout(timeoutId: NodeJS.Timer): void; +declare function setInterval(callback: (...args: any[]) => void, ms: number, ...args: any[]): NodeJS.Timer; +declare function clearInterval(intervalId: NodeJS.Timer): void; +declare function setImmediate(callback: (...args: any[]) => void, ...args: any[]): any; +declare function clearImmediate(immediateId: any): void; + +interface NodeRequireFunction { + (id: string): any; +} + +interface NodeRequire extends NodeRequireFunction { + resolve (id: string): string; + cache: { [filename: string]: NodeModule }; + extensions: { [ext: string]: (m: NodeModule, filename: string) => any }; + main: any; +} + +declare var require: NodeRequire; + +interface NodeModule { + exports: any; + require: NodeRequireFunction; + id: string; + filename: string; + parent: NodeModule; + loaded: boolean; + children: NodeModule[]; +} + +declare var module: NodeModule; + +// Same as module.exports +declare var exports: any; +declare var SlowBuffer: { + new (str: string, encoding?: string): Buffer; + new (size: number): Buffer; + new (size: Uint8Array): Buffer; + new (array: any[]): Buffer; + prototype: Buffer; + isBuffer(obj: any): boolean; + byteLength(string: string, encoding?: string): number; + concat(list: Buffer[], totalLength?: number): Buffer; +}; + +// Console class (compatible with TypeScript `lib.d.ts`). +declare interface Console { + log (msg: any, ...params: any[]): void; + info (msg: any, ...params: any[]): void; + warn (msg: any, ...params: any[]): void; + error (msg: any, ...params: any[]): void; + dir (value: any, ...params: any[]): void; + time (timerName?: string): void; + timeEnd (timerName?: string): void; + trace (msg: any, ...params: any[]): void; + assert (test?: boolean, msg?: string, ...params: any[]): void; + + Console: new (stdout: NodeJS.WritableStream) => Console; +} + +declare var console: Console; + +// Buffer class +interface Buffer extends NodeBuffer {} +declare var Buffer: { + new (str: string, encoding?: string): Buffer; + new (size: number): Buffer; + new (size: Uint8Array): Buffer; + new (array: any[]): Buffer; + prototype: Buffer; + isBuffer(obj: any): boolean; + byteLength(string: string, encoding?: string): number; + concat(list: Buffer[], totalLength?: number): Buffer; +}; + +/************************************************ +* * +* GLOBAL INTERFACES * +* * +************************************************/ +declare namespace NodeJS { + export interface ErrnoException extends Error { + errno?: number; + code?: string; + path?: string; + syscall?: string; + stack?: string; + } + + export interface EventEmitter { + addListener(event: string, listener: Function): this; + on(event: string, listener: Function): this; + once(event: string, listener: Function): this; + removeListener(event: string, listener: Function): this; + removeAllListeners(event?: string): this; + setMaxListeners(n: number): void; + listeners(event: string): Function[]; + emit(event: string, ...args: any[]): boolean; + } + + export interface ReadableStream extends EventEmitter { + readable: boolean; + read(size?: number): any; + setEncoding(encoding: string): void; + pause(): void; + resume(): void; + pipe(destination: T, options?: { end?: boolean; }): T; + unpipe(destination?: T): void; + unshift(chunk: string): void; + unshift(chunk: Buffer): void; + wrap(oldStream: ReadableStream): ReadableStream; + } + + export interface WritableStream extends EventEmitter { + writable: boolean; + write(buffer: Buffer, cb?: Function): boolean; + write(str: string, cb?: Function): boolean; + write(str: string, encoding?: string, cb?: Function): boolean; + end(): void; + end(buffer: Buffer, cb?: Function): void; + end(str: string, cb?: Function): void; + end(str: string, encoding?: string, cb?: Function): void; + } + + export interface ReadWriteStream extends ReadableStream, WritableStream {} + + export interface Process extends EventEmitter { + stdout: WritableStream; + stderr: WritableStream; + stdin: ReadableStream; + argv: string[]; + /** + * The process.execArgv property returns the set of Node.js-specific command-line options passed when the Node.js process was launched. These options do not appear in the array returned by the process.argv property, and do not include the Node.js executable, the name of the script, or any options following the script name. These options are useful in order to spawn child processes with the same execution environment as the parent. + */ + execArgv: string[]; + execPath: string; + abort(): void; + chdir(directory: string): void; + cwd(): string; + env: { + PATH: string; + [key: string]: string; + }; + exit(code?: number): void; + getgid(): number; + setgid(id: number): void; + setgid(id: string): void; + getuid(): number; + setuid(id: number): void; + setuid(id: string): void; + version: string; + versions: { + http_parser: string; + node: string; + v8: string; + ares: string; + uv: string; + zlib: string; + openssl: string; + }; + config: { + target_defaults: { + cflags: any[]; + default_configuration: string; + defines: string[]; + include_dirs: string[]; + libraries: string[]; + }; + variables: { + clang: number; + host_arch: string; + node_install_npm: boolean; + node_install_waf: boolean; + node_prefix: string; + node_shared_openssl: boolean; + node_shared_v8: boolean; + node_shared_zlib: boolean; + node_use_dtrace: boolean; + node_use_etw: boolean; + node_use_openssl: boolean; + target_arch: string; + v8_no_strict_aliasing: number; + v8_use_snapshot: boolean; + visibility: string; + }; + }; + kill(pid:number, signal?: string|number): void; + pid: number; + title: string; + arch: string; + platform: string; + memoryUsage(): { rss: number; heapTotal: number; heapUsed: number; }; + nextTick(callback: Function): void; + umask(mask?: number): number; + uptime(): number; + hrtime(time?:number[]): number[]; + + // Worker + send?(message: any, sendHandle?: any): void; + } + + export interface Timer { + ref() : void; + unref() : void; + } +} + +/** + * @deprecated + */ +interface NodeBuffer { + [index: number]: number; + write(string: string, offset?: number, length?: number, encoding?: string): number; + toString(encoding?: string, start?: number, end?: number): string; + toJSON(): any; + length: number; + copy(targetBuffer: Buffer, targetStart?: number, sourceStart?: number, sourceEnd?: number): number; + slice(start?: number, end?: number): Buffer; + readUInt8(offset: number, noAssert?: boolean): number; + readUInt16LE(offset: number, noAssert?: boolean): number; + readUInt16BE(offset: number, noAssert?: boolean): number; + readUInt32LE(offset: number, noAssert?: boolean): number; + readUInt32BE(offset: number, noAssert?: boolean): number; + readInt8(offset: number, noAssert?: boolean): number; + readInt16LE(offset: number, noAssert?: boolean): number; + readInt16BE(offset: number, noAssert?: boolean): number; + readInt32LE(offset: number, noAssert?: boolean): number; + readInt32BE(offset: number, noAssert?: boolean): number; + readFloatLE(offset: number, noAssert?: boolean): number; + readFloatBE(offset: number, noAssert?: boolean): number; + readDoubleLE(offset: number, noAssert?: boolean): number; + readDoubleBE(offset: number, noAssert?: boolean): number; + writeUInt8(value: number, offset: number, noAssert?: boolean): void; + writeUInt16LE(value: number, offset: number, noAssert?: boolean): void; + writeUInt16BE(value: number, offset: number, noAssert?: boolean): void; + writeUInt32LE(value: number, offset: number, noAssert?: boolean): void; + writeUInt32BE(value: number, offset: number, noAssert?: boolean): void; + writeInt8(value: number, offset: number, noAssert?: boolean): void; + writeInt16LE(value: number, offset: number, noAssert?: boolean): void; + writeInt16BE(value: number, offset: number, noAssert?: boolean): void; + writeInt32LE(value: number, offset: number, noAssert?: boolean): void; + writeInt32BE(value: number, offset: number, noAssert?: boolean): void; + writeFloatLE(value: number, offset: number, noAssert?: boolean): void; + writeFloatBE(value: number, offset: number, noAssert?: boolean): void; + writeDoubleLE(value: number, offset: number, noAssert?: boolean): void; + writeDoubleBE(value: number, offset: number, noAssert?: boolean): void; + fill(value: any, offset?: number, end?: number): void; +} + +/************************************************ +* * +* MODULES * +* * +************************************************/ +declare module "buffer" { + export var INSPECT_MAX_BYTES: number; +} + +declare module "querystring" { + export function stringify(obj: any, sep?: string, eq?: string): string; + export function parse(str: string, sep?: string, eq?: string, options?: { maxKeys?: number; }): any; + export function escape(): any; + export function unescape(): any; +} + +declare module "events" { + export class EventEmitter implements NodeJS.EventEmitter { + static listenerCount(emitter: EventEmitter, event: string): number; + + addListener(event: string, listener: Function): this; + on(event: string, listener: Function): this; + once(event: string, listener: Function): this; + removeListener(event: string, listener: Function): this; + removeAllListeners(event?: string): this; + setMaxListeners(n: number): void; + listeners(event: string): Function[]; + emit(event: string, ...args: any[]): boolean; + } +} + +declare module "http" { + import events = require("events"); + import net = require("net"); + import stream = require("stream"); + + export interface OutgoingHeaders { + [header: string]: number | string | string[]; + } + + export interface IncomingHeaders { + [header: string]: string | string[]; + } + + export interface Server extends events.EventEmitter { + listen(port: number, hostname?: string, backlog?: number, callback?: Function): Server; + listen(path: string, callback?: Function): Server; + listen(handle: any, listeningListener?: Function): Server; + close(cb?: any): Server; + address(): { port: number; family: string; address: string; }; + maxHeadersCount: number; + } + + export interface IncomingMessage extends events.EventEmitter, stream.Readable { + httpVersion: string; + headers: IncomingHeaders; + rawHeaders: string[]; + trailers: IncomingHeaders; + rawTrailers: string[]; + setTimeout(msecs: number, callback: Function): NodeJS.Timer; + /** + * Only valid for request obtained from http.Server. + */ + method?: string; + /** + * Only valid for request obtained from http.Server. + */ + url?: string; + /** + * Only valid for response obtained from http.ClientRequest. + */ + statusCode?: number; + /** + * Only valid for response obtained from http.ClientRequest. + */ + statusMessage?: string; + socket: net.Socket; + } + + export interface ServerResponse extends events.EventEmitter, stream.Writable { + // Extended base methods + write(buffer: Buffer): boolean; + write(buffer: Buffer, cb?: Function): boolean; + write(str: string, cb?: Function): boolean; + write(str: string, encoding?: string, cb?: Function): boolean; + write(str: string, encoding?: string, fd?: string): boolean; + + writeContinue(): void; + writeHead(statusCode: number, statusText?: string, headers?: OutgoingHeaders): void; + writeHead(statusCode: number, headers?: OutgoingHeaders): void; + statusCode: number; + setHeader(name: string, value: string): void; + sendDate: boolean; + getHeader(name: string): string; + removeHeader(name: string): void; + write(chunk: any, encoding?: string): any; + addTrailers(headers: OutgoingHeaders): void; + + // Extended base methods + end(): void; + end(buffer: Buffer, cb?: Function): void; + end(str: string, cb?: Function): void; + end(str: string, encoding?: string, cb?: Function): void; + } + + /** + * Object returned by http.request() + */ + export interface ClientRequest extends events.EventEmitter, NodeJS.WritableStream { + abort(): void; + setTimeout(timeout: number, callback?: Function): void; + setNoDelay(noDelay?: boolean): void; + setSocketKeepAlive(enable?: boolean, initialDelay?: number): void; + } + + export interface AgentOptions { + /** + * Keep sockets around in a pool to be used by other requests in the future. Default = false + */ + keepAlive?: boolean; + /** + * When using HTTP KeepAlive, how often to send TCP KeepAlive packets over sockets being kept alive. Default = 1000. + * Only relevant if keepAlive is set to true. + */ + keepAliveMsecs?: number; + /** + * Maximum number of sockets to allow per host. Default for Node 0.10 is 5, default for Node 0.12 is Infinity + */ + maxSockets?: number; + /** + * Maximum number of sockets to leave open in a free state. Only relevant if keepAlive is set to true. Default = 256. + */ + maxFreeSockets?: number; + } + + export class Agent { + maxSockets: number; + sockets: any; + requests: any; + + constructor(opts?: AgentOptions); + + /** + * Destroy any sockets that are currently in use by the agent. + * It is usually not necessary to do this. However, if you are using an agent with KeepAlive enabled, + * then it is best to explicitly shut down the agent when you know that it will no longer be used. Otherwise, + * sockets may hang open for quite a long time before the server terminates them. + */ + destroy(): void; + } + + /** + * Options for http.request() + */ + export interface RequestOptions { + /** + * A domain name or IP address of the server to issue the request to. Defaults to 'localhost'. + */ + host?: string; + /** + * To support url.parse() hostname is preferred over host + */ + hostname?: string; + /** + * Port of remote server. Defaults to 80. + */ + port?: number | string; + /** + * Local interface to bind for network connections. + */ + localAddress?: string; + /** + * Unix Domain Socket (use one of host:port or socketPath) + */ + socketPath?: string; + /** + * A string specifying the HTTP request method. Defaults to 'GET'. + */ + method?: string; + /** + * Request path. Defaults to '/'. Should include query string if any. E.G. '/index.html?page=12' + */ + path?: string; + /** + * An object containing request headers. + */ + headers?: OutgoingHeaders; + /** + * Basic authentication i.e. 'user:password' to compute an Authorization header. + */ + auth?: string; + /** + * Controls Agent behavior. When an Agent is used request will default to Connection: keep-alive. Possible values: + * - undefined (default): use global Agent for this host and port. + * - Agent object: explicitly use the passed in Agent. + * - false: opts out of connection pooling with an Agent, defaults request to Connection: close. + */ + agent?: Agent | boolean; + } + + export var STATUS_CODES: { + [errorCode: number]: string; + [errorCode: string]: string; + }; + export function createServer(requestListener?: (request: IncomingMessage, response: ServerResponse) =>void ): Server; + export function createClient(port?: number, host?: string): any; + export function request(options: string | RequestOptions, callback?: (response: IncomingMessage) => void): ClientRequest; + export function get(options: string | RequestOptions, callback?: (response: IncomingMessage) => void): ClientRequest; + export var globalAgent: Agent; +} +declare module "cluster" { + import child = require("child_process"); + import events = require("events"); + + export interface ClusterSettings { + exec?: string; + args?: string[]; + silent?: boolean; + } + + export class Worker extends events.EventEmitter { + id: string; + process: child.ChildProcess; + suicide: boolean; + send(message: any, sendHandle?: any): void; + kill(signal?: string): void; + destroy(signal?: string): void; + disconnect(): void; + } + + export var settings: ClusterSettings; + export var isMaster: boolean; + export var isWorker: boolean; + export function setupMaster(settings?: ClusterSettings): void; + export function fork(env?: any): Worker; + export function disconnect(callback?: Function): void; + export var worker: Worker; + export var workers: Worker[]; + + // Event emitter + export function addListener(event: string, listener: Function): void; + export function on(event: string, listener: Function): any; + export function once(event: string, listener: Function): void; + export function removeListener(event: string, listener: Function): void; + export function removeAllListeners(event?: string): void; + export function setMaxListeners(n: number): void; + export function listeners(event: string): Function[]; + export function emit(event: string, ...args: any[]): boolean; +} + +declare module "zlib" { + import stream = require("stream"); + export interface ZlibOptions { chunkSize?: number; windowBits?: number; level?: number; memLevel?: number; strategy?: number; dictionary?: any; } + export interface ZlibCallback { (error: Error, result: any): void } + + export interface Gzip extends stream.Transform { } + export interface Gunzip extends stream.Transform { } + export interface Deflate extends stream.Transform { } + export interface Inflate extends stream.Transform { } + export interface DeflateRaw extends stream.Transform { } + export interface InflateRaw extends stream.Transform { } + export interface Unzip extends stream.Transform { } + + export function createGzip(options?: ZlibOptions): Gzip; + export function createGunzip(options?: ZlibOptions): Gunzip; + export function createDeflate(options?: ZlibOptions): Deflate; + export function createInflate(options?: ZlibOptions): Inflate; + export function createDeflateRaw(options?: ZlibOptions): DeflateRaw; + export function createInflateRaw(options?: ZlibOptions): InflateRaw; + export function createUnzip(options?: ZlibOptions): Unzip; + + export function deflate(buf: Buffer | string, callback: ZlibCallback): void; + export function deflateRaw(buf: Buffer | string, callback: ZlibCallback): void; + export function gzip(buf: Buffer | string, callback: ZlibCallback): void; + export function gunzip(buf: Buffer | string, callback: ZlibCallback): void; + export function inflate(buf: Buffer | string, callback: ZlibCallback): void; + export function inflateRaw(buf: Buffer | string, callback: ZlibCallback): void; + export function unzip(buf: Buffer | string, callback: ZlibCallback): void; + + // Constants + export var Z_NO_FLUSH: number; + export var Z_PARTIAL_FLUSH: number; + export var Z_SYNC_FLUSH: number; + export var Z_FULL_FLUSH: number; + export var Z_FINISH: number; + export var Z_BLOCK: number; + export var Z_TREES: number; + export var Z_OK: number; + export var Z_STREAM_END: number; + export var Z_NEED_DICT: number; + export var Z_ERRNO: number; + export var Z_STREAM_ERROR: number; + export var Z_DATA_ERROR: number; + export var Z_MEM_ERROR: number; + export var Z_BUF_ERROR: number; + export var Z_VERSION_ERROR: number; + export var Z_NO_COMPRESSION: number; + export var Z_BEST_SPEED: number; + export var Z_BEST_COMPRESSION: number; + export var Z_DEFAULT_COMPRESSION: number; + export var Z_FILTERED: number; + export var Z_HUFFMAN_ONLY: number; + export var Z_RLE: number; + export var Z_FIXED: number; + export var Z_DEFAULT_STRATEGY: number; + export var Z_BINARY: number; + export var Z_TEXT: number; + export var Z_ASCII: number; + export var Z_UNKNOWN: number; + export var Z_DEFLATED: number; + export var Z_NULL: number; +} + +declare module "os" { + export function tmpdir(): string; + export function hostname(): string; + export function type(): string; + export function platform(): string; + export function arch(): string; + export function release(): string; + export function uptime(): number; + export function loadavg(): number[]; + export function totalmem(): number; + export function freemem(): number; + export function cpus(): { model: string; speed: number; times: { user: number; nice: number; sys: number; idle: number; irq: number; }; }[]; + export function networkInterfaces(): any; + export var EOL: string; +} + +declare module "https" { + import tls = require("tls"); + import events = require("events"); + import http = require("http"); + + export interface ServerOptions { + pfx?: any; + key?: any; + passphrase?: string; + cert?: any; + ca?: any; + crl?: any; + ciphers?: string; + honorCipherOrder?: boolean; + requestCert?: boolean; + rejectUnauthorized?: boolean; + NPNProtocols?: any; + SNICallback?: (servername: string) => any; + } + + export interface RequestOptions extends http.RequestOptions { + pfx?: string | Buffer; + key?: string | Buffer; + passphrase?: string; + cert?: string | Buffer; + ca?: string | Buffer | Array; + ciphers?: string; + rejectUnauthorized?: boolean; + } + + export interface Agent { + maxSockets: number; + sockets: any; + requests: any; + } + + export var Agent: { + new (options?: RequestOptions): Agent; + }; + + export interface Server extends tls.Server { } + export function createServer(options: ServerOptions, requestListener?: Function): Server; + export function request(options: string | RequestOptions, callback?: (res: http.IncomingMessage) =>void): http.ClientRequest; + export function get(options: string | RequestOptions, callback?: (res: http.IncomingMessage) =>void): http.ClientRequest; + export var globalAgent: Agent; +} + +declare module "punycode" { + export function decode(string: string): string; + export function encode(string: string): string; + export function toUnicode(domain: string): string; + export function toASCII(domain: string): string; + export var ucs2: ucs2; + interface ucs2 { + decode(string: string): string; + encode(codePoints: number[]): string; + } + export var version: any; +} + +declare module "repl" { + import stream = require("stream"); + import events = require("events"); + + export interface ReplOptions { + prompt?: string; + input?: NodeJS.ReadableStream; + output?: NodeJS.WritableStream; + terminal?: boolean; + eval?: Function; + useColors?: boolean; + useGlobal?: boolean; + ignoreUndefined?: boolean; + writer?: Function; + } + export function start(options: ReplOptions): events.EventEmitter; +} + +declare module "readline" { + import events = require("events"); + import stream = require("stream"); + + export interface ReadLine extends events.EventEmitter { + setPrompt(prompt: string, length: number): void; + prompt(preserveCursor?: boolean): void; + question(query: string, callback: Function): void; + pause(): void; + resume(): void; + close(): void; + write(data: any, key?: any): void; + } + export interface ReadLineOptions { + input: NodeJS.ReadableStream; + output: NodeJS.WritableStream; + completer?: Function; + terminal?: boolean; + } + export function createInterface(options: ReadLineOptions): ReadLine; +} + +declare module "vm" { + export interface Context { } + export interface Script { + runInThisContext(): void; + runInNewContext(sandbox?: Context): void; + } + export function runInThisContext(code: string, filename?: string): void; + export function runInNewContext(code: string, sandbox?: Context, filename?: string): void; + export function runInContext(code: string, context: Context, filename?: string): void; + export function createContext(initSandbox?: Context): Context; + export function createScript(code: string, filename?: string): Script; +} + +declare module "child_process" { + import events = require("events"); + import stream = require("stream"); + + export interface ChildProcess extends events.EventEmitter { + stdin: stream.Writable; + stdout: stream.Readable; + stderr: stream.Readable; + pid: number; + kill(signal?: string): void; + send(message: any, sendHandle: any): void; + connected: boolean; + disconnect(): void; + } + + export function spawn(command: string, args?: string[], options?: { + cwd?: string; + stdio?: any; + custom?: any; + env?: any; + detached?: boolean; + }): ChildProcess; + export function exec(command: string, options: { + cwd?: string; + stdio?: any; + customFds?: any; + env?: any; + encoding?: string; + timeout?: number; + maxBuffer?: number; + killSignal?: string; + }, callback: (error: Error, stdout: Buffer, stderr: Buffer) =>void ): ChildProcess; + export function exec(command: string, callback: (error: Error, stdout: Buffer, stderr: Buffer) =>void ): ChildProcess; + export function execFile(file: string, + callback?: (error: Error, stdout: Buffer, stderr: Buffer) =>void ): ChildProcess; + export function execFile(file: string, args?: string[], + callback?: (error: Error, stdout: Buffer, stderr: Buffer) =>void ): ChildProcess; + export function execFile(file: string, args?: string[], options?: { + cwd?: string; + stdio?: any; + customFds?: any; + env?: any; + encoding?: string; + timeout?: number; + maxBuffer?: number; + killSignal?: string; + }, callback?: (error: Error, stdout: Buffer, stderr: Buffer) =>void ): ChildProcess; + export function fork(modulePath: string, args?: string[], options?: { + cwd?: string; + env?: any; + encoding?: string; + }): ChildProcess; +} + +declare module "url" { + export interface Url { + href?: string; + protocol?: string; + auth?: string; + hostname?: string; + port?: string; + host?: string; + pathname?: string; + search?: string; + query?: string | any; + slashes?: boolean; + hash?: string; + path?: string; + } + + export function parse(urlStr: string, parseQueryString?: boolean , slashesDenoteHost?: boolean ): Url; + export function format(url: Url): string; + export function resolve(from: string, to: string): string; +} + +declare module "dns" { + export function lookup(domain: string, family: number, callback: (err: Error, address: string, family: number) =>void ): string; + export function lookup(domain: string, callback: (err: Error, address: string, family: number) =>void ): string; + export function resolve(domain: string, rrtype: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function resolve(domain: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function resolve4(domain: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function resolve6(domain: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function resolveMx(domain: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function resolveTxt(domain: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function resolveSrv(domain: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function resolveNs(domain: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function resolveCname(domain: string, callback: (err: Error, addresses: string[]) =>void ): string[]; + export function reverse(ip: string, callback: (err: Error, domains: string[]) =>void ): string[]; +} + +declare module "net" { + import stream = require("stream"); + + export interface Socket extends stream.Duplex { + // Extended base methods + write(buffer: Buffer): boolean; + write(buffer: Buffer, cb?: Function): boolean; + write(str: string, cb?: Function): boolean; + write(str: string, encoding?: string, cb?: Function): boolean; + write(str: string, encoding?: string, fd?: string): boolean; + + connect(port: number, host?: string, connectionListener?: Function): void; + connect(path: string, connectionListener?: Function): void; + bufferSize: number; + setEncoding(encoding?: string): void; + write(data: any, encoding?: string, callback?: Function): void; + destroy(): void; + pause(): void; + resume(): void; + setTimeout(timeout: number, callback?: Function): void; + setNoDelay(noDelay?: boolean): void; + setKeepAlive(enable?: boolean, initialDelay?: number): void; + address(): { port: number; family: string; address: string; }; + unref(): void; + ref(): void; + + remoteAddress: string; + remotePort: number; + bytesRead: number; + bytesWritten: number; + + // Extended base methods + end(): void; + end(buffer: Buffer, cb?: Function): void; + end(str: string, cb?: Function): void; + end(str: string, encoding?: string, cb?: Function): void; + end(data?: any, encoding?: string): void; + } + + export var Socket: { + new (options?: { fd?: string; type?: string; allowHalfOpen?: boolean; }): Socket; + }; + + export interface Server extends Socket { + listen(port: number, host?: string, backlog?: number, listeningListener?: Function): Server; + listen(path: string, listeningListener?: Function): Server; + listen(handle: any, listeningListener?: Function): Server; + close(callback?: Function): Server; + address(): { port: number; family: string; address: string; }; + maxConnections: number; + connections: number; + } + export function createServer(connectionListener?: (socket: Socket) =>void ): Server; + export function createServer(options?: { allowHalfOpen?: boolean; }, connectionListener?: (socket: Socket) =>void ): Server; + export function connect(options: { port: number, host?: string, localAddress? : string, allowHalfOpen?: boolean; }, connectionListener?: Function): Socket; + export function connect(port: number, host?: string, connectionListener?: Function): Socket; + export function connect(path: string, connectionListener?: Function): Socket; + export function createConnection(options: { port: number, host?: string, localAddress? : string, allowHalfOpen?: boolean; }, connectionListener?: Function): Socket; + export function createConnection(port: number, host?: string, connectionListener?: Function): Socket; + export function createConnection(path: string, connectionListener?: Function): Socket; + export function isIP(input: string): number; + export function isIPv4(input: string): boolean; + export function isIPv6(input: string): boolean; +} + +declare module "dgram" { + import events = require("events"); + + interface RemoteInfo { + address: string; + port: number; + size: number; + } + + interface AddressInfo { + address: string; + family: string; + port: number; + } + + export function createSocket(type: string, callback?: (msg: Buffer, rinfo: RemoteInfo) => void): Socket; + + interface Socket extends events.EventEmitter { + send(buf: Buffer, offset: number, length: number, port: number, address: string, callback?: (error: Error, bytes: number) => void): void; + bind(port: number, address?: string, callback?: () => void): void; + close(): void; + address(): AddressInfo; + setBroadcast(flag: boolean): void; + setMulticastTTL(ttl: number): void; + setMulticastLoopback(flag: boolean): void; + addMembership(multicastAddress: string, multicastInterface?: string): void; + dropMembership(multicastAddress: string, multicastInterface?: string): void; + } +} + +declare module "fs" { + import stream = require("stream"); + import events = require("events"); + + interface Stats { + isFile(): boolean; + isDirectory(): boolean; + isBlockDevice(): boolean; + isCharacterDevice(): boolean; + isSymbolicLink(): boolean; + isFIFO(): boolean; + isSocket(): boolean; + dev: number; + ino: number; + mode: number; + nlink: number; + uid: number; + gid: number; + rdev: number; + size: number; + blksize: number; + blocks: number; + atime: Date; + mtime: Date; + ctime: Date; + } + + interface FSWatcher extends events.EventEmitter { + close(): void; + } + + export interface ReadStream extends stream.Readable { + close(): void; + } + export interface WriteStream extends stream.Writable { + close(): void; + bytesWritten: number; + } + + export function rename(oldPath: string, newPath: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function renameSync(oldPath: string, newPath: string): void; + export function truncate(path: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function truncate(path: string, len: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function truncateSync(path: string, len?: number): void; + export function ftruncate(fd: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function ftruncate(fd: number, len: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function ftruncateSync(fd: number, len?: number): void; + export function chown(path: string, uid: number, gid: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function chownSync(path: string, uid: number, gid: number): void; + export function fchown(fd: number, uid: number, gid: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function fchownSync(fd: number, uid: number, gid: number): void; + export function lchown(path: string, uid: number, gid: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function lchownSync(path: string, uid: number, gid: number): void; + export function chmod(path: string, mode: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function chmod(path: string, mode: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function chmodSync(path: string, mode: number): void; + export function chmodSync(path: string, mode: string): void; + export function fchmod(fd: number, mode: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function fchmod(fd: number, mode: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function fchmodSync(fd: number, mode: number): void; + export function fchmodSync(fd: number, mode: string): void; + export function lchmod(path: string, mode: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function lchmod(path: string, mode: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function lchmodSync(path: string, mode: number): void; + export function lchmodSync(path: string, mode: string): void; + export function stat(path: string, callback?: (err: NodeJS.ErrnoException, stats: Stats) => any): void; + export function lstat(path: string, callback?: (err: NodeJS.ErrnoException, stats: Stats) => any): void; + export function fstat(fd: number, callback?: (err: NodeJS.ErrnoException, stats: Stats) => any): void; + export function statSync(path: string): Stats; + export function lstatSync(path: string): Stats; + export function fstatSync(fd: number): Stats; + export function link(srcpath: string, dstpath: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function linkSync(srcpath: string, dstpath: string): void; + export function symlink(srcpath: string, dstpath: string, type?: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function symlinkSync(srcpath: string, dstpath: string, type?: string): void; + export function readlink(path: string, callback?: (err: NodeJS.ErrnoException, linkString: string) => any): void; + export function readlinkSync(path: string): string; + export function realpath(path: string, callback?: (err: NodeJS.ErrnoException, resolvedPath: string) => any): void; + export function realpath(path: string, cache: {[path: string]: string}, callback: (err: NodeJS.ErrnoException, resolvedPath: string) =>any): void; + export function realpathSync(path: string, cache?: {[path: string]: string}): string; + export function unlink(path: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function unlinkSync(path: string): void; + export function rmdir(path: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function rmdirSync(path: string): void; + export function mkdir(path: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function mkdir(path: string, mode: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function mkdir(path: string, mode: string, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function mkdirSync(path: string, mode?: number): void; + export function mkdirSync(path: string, mode?: string): void; + export function readdir(path: string, callback?: (err: NodeJS.ErrnoException, files: string[]) => void): void; + export function readdirSync(path: string): string[]; + export function close(fd: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function closeSync(fd: number): void; + export function open(path: string, flags: string, callback?: (err: NodeJS.ErrnoException, fd: number) => any): void; + export function open(path: string, flags: string, mode: number, callback?: (err: NodeJS.ErrnoException, fd: number) => any): void; + export function open(path: string, flags: string, mode: string, callback?: (err: NodeJS.ErrnoException, fd: number) => any): void; + export function openSync(path: string, flags: string, mode?: number): number; + export function openSync(path: string, flags: string, mode?: string): number; + export function utimes(path: string, atime: number, mtime: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function utimes(path: string, atime: Date, mtime: Date, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function utimesSync(path: string, atime: number, mtime: number): void; + export function utimesSync(path: string, atime: Date, mtime: Date): void; + export function futimes(fd: number, atime: number, mtime: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function futimes(fd: number, atime: Date, mtime: Date, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function futimesSync(fd: number, atime: number, mtime: number): void; + export function futimesSync(fd: number, atime: Date, mtime: Date): void; + export function fsync(fd: number, callback?: (err?: NodeJS.ErrnoException) => void): void; + export function fsyncSync(fd: number): void; + export function write(fd: number, buffer: Buffer, offset: number, length: number, position: number, callback?: (err: NodeJS.ErrnoException, written: number, buffer: Buffer) => void): void; + export function writeSync(fd: number, buffer: Buffer, offset: number, length: number, position: number): number; + export function read(fd: number, buffer: Buffer, offset: number, length: number, position: number, callback?: (err: NodeJS.ErrnoException, bytesRead: number, buffer: Buffer) => void): void; + export function readSync(fd: number, buffer: Buffer, offset: number, length: number, position: number): number; + export function readFile(filename: string, encoding: string, callback: (err: NodeJS.ErrnoException, data: string) => void): void; + export function readFile(filename: string, options: { encoding: string; flag?: string; }, callback: (err: NodeJS.ErrnoException, data: string) => void): void; + export function readFile(filename: string, options: { flag?: string; }, callback: (err: NodeJS.ErrnoException, data: Buffer) => void): void; + export function readFile(filename: string, callback: (err: NodeJS.ErrnoException, data: Buffer) => void ): void; + export function readFileSync(filename: string, encoding: string): string; + export function readFileSync(filename: string, options: { encoding: string; flag?: string; }): string; + export function readFileSync(filename: string, options?: { flag?: string; }): Buffer; + export function writeFile(filename: string, data: any, callback?: (err: NodeJS.ErrnoException) => void): void; + export function writeFile(filename: string, data: any, options: { encoding?: string; mode?: number; flag?: string; }, callback?: (err: NodeJS.ErrnoException) => void): void; + export function writeFile(filename: string, data: any, options: { encoding?: string; mode?: string; flag?: string; }, callback?: (err: NodeJS.ErrnoException) => void): void; + export function writeFileSync(filename: string, data: any, options?: { encoding?: string; mode?: number; flag?: string; }): void; + export function writeFileSync(filename: string, data: any, options?: { encoding?: string; mode?: string; flag?: string; }): void; + export function appendFile(filename: string, data: any, options: { encoding?: string; mode?: number; flag?: string; }, callback?: (err: NodeJS.ErrnoException) => void): void; + export function appendFile(filename: string, data: any, options: { encoding?: string; mode?: string; flag?: string; }, callback?: (err: NodeJS.ErrnoException) => void): void; + export function appendFile(filename: string, data: any, callback?: (err: NodeJS.ErrnoException) => void): void; + export function appendFileSync(filename: string, data: any, options?: { encoding?: string; mode?: number; flag?: string; }): void; + export function appendFileSync(filename: string, data: any, options?: { encoding?: string; mode?: string; flag?: string; }): void; + export function watchFile(filename: string, listener: (curr: Stats, prev: Stats) => void): void; + export function watchFile(filename: string, options: { persistent?: boolean; interval?: number; }, listener: (curr: Stats, prev: Stats) => void): void; + export function unwatchFile(filename: string, listener?: (curr: Stats, prev: Stats) => void): void; + export function watch(filename: string, listener?: (event: string, filename: string) => any): FSWatcher; + export function watch(filename: string, options: { persistent?: boolean; }, listener?: (event: string, filename: string) => any): FSWatcher; + export function exists(path: string, callback?: (exists: boolean) => void): void; + export function existsSync(path: string): boolean; + export function createReadStream(path: string, options?: { + flags?: string; + encoding?: string; + fd?: string; + mode?: number; + bufferSize?: number; + }): ReadStream; + export function createReadStream(path: string, options?: { + flags?: string; + encoding?: string; + fd?: string; + mode?: string; + bufferSize?: number; + }): ReadStream; + export function createWriteStream(path: string, options?: { + flags?: string; + encoding?: string; + string?: string; + }): WriteStream; +} + +declare module "path" { + export function normalize(p: string): string; + export function join(...paths: string[]): string; + export function resolve(...pathSegments: string[]): string; + export function relative(from: string, to: string): string; + export function dirname(p: string): string; + export function basename(p: string, ext?: string): string; + export function extname(p: string): string; + export var sep: string; +} + +declare module "string_decoder" { + export interface NodeStringDecoder { + write(buffer: Buffer): string; + detectIncompleteChar(buffer: Buffer): number; + } + export var StringDecoder: { + new (encoding: string): NodeStringDecoder; + }; +} + +declare module "tls" { + import crypto = require("crypto"); + import net = require("net"); + import stream = require("stream"); + + var CLIENT_RENEG_LIMIT: number; + var CLIENT_RENEG_WINDOW: number; + + export interface TlsOptions { + pfx?: string | Buffer; + key?: string | Buffer; + passphrase?: string; + cert?: string | Buffer; + ca?: string | Buffer | Array; + crl?: string | string[]; + ciphers?: string; + honorCipherOrder?: any; + requestCert?: boolean; + rejectUnauthorized?: boolean; + NPNProtocols?: Array; + SNICallback?: (servername: string) => any; + } + + export interface ConnectionOptions { + host?: string; + port?: number | string; + socket?: net.Socket; + pfx?: string | Buffer; + key?: string | Buffer; + passphrase?: string; + cert?: string | Buffer; + ca?: string | Buffer | Array; + rejectUnauthorized?: boolean; + NPNProtocols?: Array; + servername?: string; + } + + export interface Server extends net.Server { + // Extended base methods + listen(port: number, host?: string, backlog?: number, listeningListener?: Function): Server; + listen(path: string, listeningListener?: Function): Server; + listen(handle: any, listeningListener?: Function): Server; + + listen(port: number, host?: string, callback?: Function): Server; + close(): Server; + address(): { port: number; family: string; address: string; }; + addContext(hostName: string, credentials: { + key: string; + cert: string; + ca: string; + }): void; + maxConnections: number; + connections: number; + } + + export interface ClearTextStream extends stream.Duplex { + authorized: boolean; + authorizationError: Error; + getPeerCertificate(): any; + getCipher: { + name: string; + version: string; + }; + address: { + port: number; + family: string; + address: string; + }; + remoteAddress: string; + remotePort: number; + } + + export interface SecurePair { + encrypted: any; + cleartext: any; + } + + export function createServer(options: TlsOptions, secureConnectionListener?: (cleartextStream: ClearTextStream) =>void ): Server; + export function connect(options: TlsOptions, secureConnectionListener?: () =>void ): ClearTextStream; + export function connect(port: number, host?: string, options?: ConnectionOptions, secureConnectListener?: () =>void ): ClearTextStream; + export function connect(port: number, options?: ConnectionOptions, secureConnectListener?: () =>void ): ClearTextStream; + export function createSecurePair(credentials?: crypto.Credentials, isServer?: boolean, requestCert?: boolean, rejectUnauthorized?: boolean): SecurePair; +} + +declare module "crypto" { + export interface CredentialDetails { + pfx: string; + key: string; + passphrase: string; + cert: string; + ca: any; //string | string array + crl: any; //string | string array + ciphers: string; + } + export interface Credentials { context?: any; } + export function createCredentials(details: CredentialDetails): Credentials; + export function createHash(algorithm: string): Hash; + export function createHmac(algorithm: string, key: string): Hmac; + export function createHmac(algorithm: string, key: Buffer): Hmac; + interface Hash { + update(data: any, input_encoding?: string): Hash; + digest(encoding: 'buffer'): Buffer; + digest(encoding: string): any; + digest(): Buffer; + } + interface Hmac { + update(data: any, input_encoding?: string): Hmac; + digest(encoding: 'buffer'): Buffer; + digest(encoding: string): any; + digest(): Buffer; + } + export function createCipher(algorithm: string, password: any): Cipher; + export function createCipheriv(algorithm: string, key: any, iv: any): Cipher; + interface Cipher { + update(data: Buffer): Buffer; + update(data: string, input_encoding?: string, output_encoding?: string): string; + final(): Buffer; + final(output_encoding: string): string; + setAutoPadding(auto_padding: boolean): void; + } + export function createDecipher(algorithm: string, password: any): Decipher; + export function createDecipheriv(algorithm: string, key: any, iv: any): Decipher; + interface Decipher { + update(data: Buffer): Buffer; + update(data: string, input_encoding?: string, output_encoding?: string): string; + final(): Buffer; + final(output_encoding: string): string; + setAutoPadding(auto_padding: boolean): void; + } + export function createSign(algorithm: string): Signer; + interface Signer { + update(data: any): void; + sign(private_key: string, output_format: string): string; + } + export function createVerify(algorith: string): Verify; + interface Verify { + update(data: any): void; + verify(object: string, signature: string, signature_format?: string): boolean; + } + export function createDiffieHellman(prime_length: number): DiffieHellman; + export function createDiffieHellman(prime: number, encoding?: string): DiffieHellman; + interface DiffieHellman { + generateKeys(encoding?: string): string; + computeSecret(other_public_key: string, input_encoding?: string, output_encoding?: string): string; + getPrime(encoding?: string): string; + getGenerator(encoding: string): string; + getPublicKey(encoding?: string): string; + getPrivateKey(encoding?: string): string; + setPublicKey(public_key: string, encoding?: string): void; + setPrivateKey(public_key: string, encoding?: string): void; + } + export function getDiffieHellman(group_name: string): DiffieHellman; + export function pbkdf2(password: string|Buffer, salt: string|Buffer, iterations: number, keylen: number, callback: (err: Error, derivedKey: Buffer) => any): void; + export function pbkdf2Sync(password: string|Buffer, salt: string|Buffer, iterations: number, keylen: number) : Buffer; + export function randomBytes(size: number): Buffer; + export function randomBytes(size: number, callback: (err: Error, buf: Buffer) =>void ): void; + export function pseudoRandomBytes(size: number): Buffer; + export function pseudoRandomBytes(size: number, callback: (err: Error, buf: Buffer) =>void ): void; +} + +declare module "stream" { + import events = require("events"); + + export interface Stream extends events.EventEmitter { + pipe(destination: T, options?: { end?: boolean; }): T; + } + + export interface ReadableOptions { + highWaterMark?: number; + encoding?: string; + objectMode?: boolean; + } + + export class Readable extends events.EventEmitter implements NodeJS.ReadableStream { + readable: boolean; + constructor(opts?: ReadableOptions); + _read(size: number): void; + read(size?: number): any; + setEncoding(encoding: string): void; + pause(): void; + resume(): void; + pipe(destination: T, options?: { end?: boolean; }): T; + unpipe(destination?: T): void; + unshift(chunk: string): void; + unshift(chunk: Buffer): void; + wrap(oldStream: NodeJS.ReadableStream): NodeJS.ReadableStream; + push(chunk: any, encoding?: string): boolean; + } + + export interface WritableOptions { + highWaterMark?: number; + decodeStrings?: boolean; + objectMode?: boolean; + } + + export class Writable extends events.EventEmitter implements NodeJS.WritableStream { + writable: boolean; + constructor(opts?: WritableOptions); + _write(data: Buffer, encoding: string, callback: Function): void; + _write(data: string, encoding: string, callback: Function): void; + write(buffer: Buffer, cb?: Function): boolean; + write(str: string, cb?: Function): boolean; + write(str: string, encoding?: string, cb?: Function): boolean; + end(): void; + end(buffer: Buffer, cb?: Function): void; + end(str: string, cb?: Function): void; + end(str: string, encoding?: string, cb?: Function): void; + } + + export interface DuplexOptions extends ReadableOptions, WritableOptions { + allowHalfOpen?: boolean; + } + + // Note: Duplex extends both Readable and Writable. + export class Duplex extends Readable implements NodeJS.ReadWriteStream { + writable: boolean; + constructor(opts?: DuplexOptions); + _write(data: Buffer, encoding: string, callback: Function): void; + _write(data: string, encoding: string, callback: Function): void; + write(buffer: Buffer, cb?: Function): boolean; + write(str: string, cb?: Function): boolean; + write(str: string, encoding?: string, cb?: Function): boolean; + end(): void; + end(buffer: Buffer, cb?: Function): void; + end(str: string, cb?: Function): void; + end(str: string, encoding?: string, cb?: Function): void; + } + + export interface TransformOptions extends ReadableOptions, WritableOptions {} + + // Note: Transform lacks the _read and _write methods of Readable/Writable. + export class Transform extends events.EventEmitter implements NodeJS.ReadWriteStream { + readable: boolean; + writable: boolean; + constructor(opts?: TransformOptions); + _transform(chunk: Buffer, encoding: string, callback: Function): void; + _transform(chunk: string, encoding: string, callback: Function): void; + _flush(callback: Function): void; + read(size?: number): any; + setEncoding(encoding: string): void; + pause(): void; + resume(): void; + pipe(destination: T, options?: { end?: boolean; }): T; + unpipe(destination?: T): void; + unshift(chunk: string): void; + unshift(chunk: Buffer): void; + wrap(oldStream: NodeJS.ReadableStream): NodeJS.ReadableStream; + push(chunk: any, encoding?: string): boolean; + write(buffer: Buffer, cb?: Function): boolean; + write(str: string, cb?: Function): boolean; + write(str: string, encoding?: string, cb?: Function): boolean; + end(): void; + end(buffer: Buffer, cb?: Function): void; + end(str: string, cb?: Function): void; + end(str: string, encoding?: string, cb?: Function): void; + } + + export class PassThrough extends Transform {} +} + +declare module "util" { + export interface InspectOptions { + showHidden?: boolean; + depth?: number; + colors?: boolean; + customInspect?: boolean; + } + + export function format(format: any, ...param: any[]): string; + export function debug(string: string): void; + export function error(...param: any[]): void; + export function puts(...param: any[]): void; + export function print(...param: any[]): void; + export function log(string: string): void; + export function inspect(object: any, showHidden?: boolean, depth?: number, color?: boolean): string; + export function inspect(object: any, options: InspectOptions): string; + export function isArray(object: any): boolean; + export function isRegExp(object: any): boolean; + export function isDate(object: any): boolean; + export function isError(object: any): boolean; + export function inherits(constructor: any, superConstructor: any): void; +} + +declare module "assert" { + function internal (value: any, message?: string): void; + namespace internal { + export class AssertionError implements Error { + name: string; + message: string; + actual: any; + expected: any; + operator: string; + generatedMessage: boolean; + + constructor(options?: {message?: string; actual?: any; expected?: any; + operator?: string; stackStartFunction?: Function}); + } + + export function fail(actual?: any, expected?: any, message?: string, operator?: string): void; + export function ok(value: any, message?: string): void; + export function equal(actual: any, expected: any, message?: string): void; + export function notEqual(actual: any, expected: any, message?: string): void; + export function deepEqual(actual: any, expected: any, message?: string): void; + export function notDeepEqual(acutal: any, expected: any, message?: string): void; + export function strictEqual(actual: any, expected: any, message?: string): void; + export function notStrictEqual(actual: any, expected: any, message?: string): void; + export var throws: { + (block: Function, message?: string): void; + (block: Function, error: Function, message?: string): void; + (block: Function, error: RegExp, message?: string): void; + (block: Function, error: (err: any) => boolean, message?: string): void; + }; + + export var doesNotThrow: { + (block: Function, message?: string): void; + (block: Function, error: Function, message?: string): void; + (block: Function, error: RegExp, message?: string): void; + (block: Function, error: (err: any) => boolean, message?: string): void; + }; + + export function ifError(value: any): void; + } + + export = internal; +} + +declare module "tty" { + import net = require("net"); + + export function isatty(fd: number): boolean; + export interface ReadStream extends net.Socket { + isRaw: boolean; + setRawMode(mode: boolean): void; + } + export interface WriteStream extends net.Socket { + columns: number; + rows: number; + } +} + +declare module "domain" { + import events = require("events"); + + export class Domain extends events.EventEmitter { + run(fn: Function): void; + add(emitter: events.EventEmitter): void; + remove(emitter: events.EventEmitter): void; + bind(cb: (err: Error, data: any) => any): any; + intercept(cb: (data: any) => any): any; + dispose(): void; + } + + export function create(): Domain; +} + +declare module "module" { + class Module implements NodeModule { + static runMain (): void; + static wrap (code: string): string; + static _nodeModulePaths (path: string): string[]; + static _load (request: string, parent?: Module, isMain?: boolean): any; + static _resolveFilename (request: string, parent?: Module, isMain?: boolean): string; + static _extensions: { [ext: string]: (m: Module, fileName: string) => any } + + constructor (filename: string); + + id: string; + parent: Module; + filename: string; + paths: string[]; + children: Module[]; + exports: any; + loaded: boolean; + require: NodeRequireFunction; + } + + export = Module; +} diff --git a/src/node_modules/azure-storage/typings/globals/node/typings.json b/src/node_modules/azure-storage/typings/globals/node/typings.json new file mode 100644 index 0000000..a63a7a6 --- /dev/null +++ b/src/node_modules/azure-storage/typings/globals/node/typings.json @@ -0,0 +1,13 @@ +{ + "resolution": "main", + "tree": { + "src": "https://raw.githubusercontent.com/types/env-node/959285e4da295481cf634f7d11f6ccccc863e430/0.10/typings.json", + "raw": "registry:env/node#0.10.0+20160918225031", + "version": "0.10", + "files": [ + "node.d.ts" + ], + "name": "node", + "type": "typings" + } +} diff --git a/src/node_modules/azure-storage/typings/index.d.ts b/src/node_modules/azure-storage/typings/index.d.ts new file mode 100644 index 0000000..3fef1c4 --- /dev/null +++ b/src/node_modules/azure-storage/typings/index.d.ts @@ -0,0 +1 @@ +/// diff --git a/src/node_modules/bcrypt-pbkdf/CONTRIBUTING.md b/src/node_modules/bcrypt-pbkdf/CONTRIBUTING.md new file mode 100644 index 0000000..401d34e --- /dev/null +++ b/src/node_modules/bcrypt-pbkdf/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing + +This repository uses [cr.joyent.us](https://cr.joyent.us) (Gerrit) for new +changes. Anyone can submit changes. To get started, see the [cr.joyent.us user +guide](https://github.com/joyent/joyent-gerrit/blob/master/docs/user/README.md). +This repo does not use GitHub pull requests. + +See the [Joyent Engineering +Guidelines](https://github.com/joyent/eng/blob/master/docs/index.md) for general +best practices expected in this repository. + +If you're changing something non-trivial or user-facing, you may want to submit +an issue first. diff --git a/src/node_modules/bcrypt-pbkdf/LICENSE b/src/node_modules/bcrypt-pbkdf/LICENSE new file mode 100644 index 0000000..fc58d2a --- /dev/null +++ b/src/node_modules/bcrypt-pbkdf/LICENSE @@ -0,0 +1,66 @@ +The Blowfish portions are under the following license: + +Blowfish block cipher for OpenBSD +Copyright 1997 Niels Provos +All rights reserved. + +Implementation advice by David Mazieres . + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +The bcrypt_pbkdf portions are under the following license: + +Copyright (c) 2013 Ted Unangst + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + + +Performance improvements (Javascript-specific): + +Copyright 2016, Joyent Inc +Author: Alex Wilson + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/src/node_modules/bcrypt-pbkdf/README.md b/src/node_modules/bcrypt-pbkdf/README.md new file mode 100644 index 0000000..7551f33 --- /dev/null +++ b/src/node_modules/bcrypt-pbkdf/README.md @@ -0,0 +1,45 @@ +Port of the OpenBSD `bcrypt_pbkdf` function to pure Javascript. `npm`-ified +version of [Devi Mandiri's port](https://github.com/devi/tmp/blob/master/js/bcrypt_pbkdf.js), +with some minor performance improvements. The code is copied verbatim (and +un-styled) from Devi's work. + +This product includes software developed by Niels Provos. + +## API + +### `bcrypt_pbkdf.pbkdf(pass, passlen, salt, saltlen, key, keylen, rounds)` + +Derive a cryptographic key of arbitrary length from a given password and salt, +using the OpenBSD `bcrypt_pbkdf` function. This is a combination of Blowfish and +SHA-512. + +See [this article](http://www.tedunangst.com/flak/post/bcrypt-pbkdf) for +further information. + +Parameters: + + * `pass`, a Uint8Array of length `passlen` + * `passlen`, an integer Number + * `salt`, a Uint8Array of length `saltlen` + * `saltlen`, an integer Number + * `key`, a Uint8Array of length `keylen`, will be filled with output + * `keylen`, an integer Number + * `rounds`, an integer Number, number of rounds of the PBKDF to run + +### `bcrypt_pbkdf.hash(sha2pass, sha2salt, out)` + +Calculate a Blowfish hash, given SHA2-512 output of a password and salt. Used as +part of the inner round function in the PBKDF. + +Parameters: + + * `sha2pass`, a Uint8Array of length 64 + * `sha2salt`, a Uint8Array of length 64 + * `out`, a Uint8Array of length 32, will be filled with output + +## License + +This source form is a 1:1 port from the OpenBSD `blowfish.c` and `bcrypt_pbkdf.c`. +As a result, it retains the original copyright and license. The two files are +under slightly different (but compatible) licenses, and are here combined in +one file. For each of the full license texts see `LICENSE`. diff --git a/src/node_modules/bcrypt-pbkdf/index.js b/src/node_modules/bcrypt-pbkdf/index.js new file mode 100644 index 0000000..b1b5ad4 --- /dev/null +++ b/src/node_modules/bcrypt-pbkdf/index.js @@ -0,0 +1,556 @@ +'use strict'; + +var crypto_hash_sha512 = require('tweetnacl').lowlevel.crypto_hash; + +/* + * This file is a 1:1 port from the OpenBSD blowfish.c and bcrypt_pbkdf.c. As a + * result, it retains the original copyright and license. The two files are + * under slightly different (but compatible) licenses, and are here combined in + * one file. + * + * Credit for the actual porting work goes to: + * Devi Mandiri + */ + +/* + * The Blowfish portions are under the following license: + * + * Blowfish block cipher for OpenBSD + * Copyright 1997 Niels Provos + * All rights reserved. + * + * Implementation advice by David Mazieres . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * The bcrypt_pbkdf portions are under the following license: + * + * Copyright (c) 2013 Ted Unangst + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Performance improvements (Javascript-specific): + * + * Copyright 2016, Joyent Inc + * Author: Alex Wilson + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +// Ported from OpenBSD bcrypt_pbkdf.c v1.9 + +var BLF_J = 0; + +var Blowfish = function() { + this.S = [ + new Uint32Array([ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, + 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, + 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, + 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, + 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, + 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, + 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, + 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, + 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, + 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677, + 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, + 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88, + 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, + 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, + 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, + 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, + 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, + 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, + 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, + 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, + 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, + 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, + 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, + 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, + 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, + 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, + 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, + 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, + 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, + 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8, + 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, + 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, + 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, + 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, + 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, + 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477, + 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, + 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, + 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, + 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, + 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, + 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, + 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a]), + new Uint32Array([ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, + 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, + 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, + 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, + 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, + 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, + 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, + 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, + 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, + 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7, + 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, + 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, + 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, + 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87, + 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, + 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, + 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, + 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, + 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, + 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, + 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, + 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, + 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, + 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, + 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, + 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, + 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, + 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, + 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, + 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281, + 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, + 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, + 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, + 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0, + 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, + 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, + 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, + 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, + 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, + 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, + 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, + 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, + 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7]), + new Uint32Array([ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, + 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, + 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, + 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, + 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, + 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, + 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, + 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42, + 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, + 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb, + 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, + 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, + 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, + 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc, + 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, + 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, + 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, + 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, + 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, + 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, + 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, + 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, + 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, + 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, + 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, + 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, + 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, + 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, + 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, + 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61, + 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, + 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, + 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, + 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633, + 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, + 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, + 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, + 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, + 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, + 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, + 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, + 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, + 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0]), + new Uint32Array([ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, + 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, + 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, + 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, + 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, + 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, + 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, + 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9, + 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, + 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, + 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, + 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b, + 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, + 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd, + 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, + 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, + 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, + 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, + 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, + 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, + 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, + 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, + 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, + 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, + 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, + 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, + 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, + 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, + 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, + 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, + 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, + 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964, + 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, + 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, + 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, + 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, + 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, + 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, + 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, + 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, + 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, + 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, + 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6]) + ]; + this.P = new Uint32Array([ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, + 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, + 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, + 0x9216d5d9, 0x8979fb1b]); +}; + +function F(S, x8, i) { + return (((S[0][x8[i+3]] + + S[1][x8[i+2]]) ^ + S[2][x8[i+1]]) + + S[3][x8[i]]); +}; + +Blowfish.prototype.encipher = function(x, x8) { + if (x8 === undefined) { + x8 = new Uint8Array(x.buffer); + if (x.byteOffset !== 0) + x8 = x8.subarray(x.byteOffset); + } + x[0] ^= this.P[0]; + for (var i = 1; i < 16; i += 2) { + x[1] ^= F(this.S, x8, 0) ^ this.P[i]; + x[0] ^= F(this.S, x8, 4) ^ this.P[i+1]; + } + var t = x[0]; + x[0] = x[1] ^ this.P[17]; + x[1] = t; +}; + +Blowfish.prototype.decipher = function(x) { + var x8 = new Uint8Array(x.buffer); + if (x.byteOffset !== 0) + x8 = x8.subarray(x.byteOffset); + x[0] ^= this.P[17]; + for (var i = 16; i > 0; i -= 2) { + x[1] ^= F(this.S, x8, 0) ^ this.P[i]; + x[0] ^= F(this.S, x8, 4) ^ this.P[i-1]; + } + var t = x[0]; + x[0] = x[1] ^ this.P[0]; + x[1] = t; +}; + +function stream2word(data, databytes){ + var i, temp = 0; + for (i = 0; i < 4; i++, BLF_J++) { + if (BLF_J >= databytes) BLF_J = 0; + temp = (temp << 8) | data[BLF_J]; + } + return temp; +}; + +Blowfish.prototype.expand0state = function(key, keybytes) { + var d = new Uint32Array(2), i, k; + var d8 = new Uint8Array(d.buffer); + + for (i = 0, BLF_J = 0; i < 18; i++) { + this.P[i] ^= stream2word(key, keybytes); + } + BLF_J = 0; + + for (i = 0; i < 18; i += 2) { + this.encipher(d, d8); + this.P[i] = d[0]; + this.P[i+1] = d[1]; + } + + for (i = 0; i < 4; i++) { + for (k = 0; k < 256; k += 2) { + this.encipher(d, d8); + this.S[i][k] = d[0]; + this.S[i][k+1] = d[1]; + } + } +}; + +Blowfish.prototype.expandstate = function(data, databytes, key, keybytes) { + var d = new Uint32Array(2), i, k; + + for (i = 0, BLF_J = 0; i < 18; i++) { + this.P[i] ^= stream2word(key, keybytes); + } + + for (i = 0, BLF_J = 0; i < 18; i += 2) { + d[0] ^= stream2word(data, databytes); + d[1] ^= stream2word(data, databytes); + this.encipher(d); + this.P[i] = d[0]; + this.P[i+1] = d[1]; + } + + for (i = 0; i < 4; i++) { + for (k = 0; k < 256; k += 2) { + d[0] ^= stream2word(data, databytes); + d[1] ^= stream2word(data, databytes); + this.encipher(d); + this.S[i][k] = d[0]; + this.S[i][k+1] = d[1]; + } + } + BLF_J = 0; +}; + +Blowfish.prototype.enc = function(data, blocks) { + for (var i = 0; i < blocks; i++) { + this.encipher(data.subarray(i*2)); + } +}; + +Blowfish.prototype.dec = function(data, blocks) { + for (var i = 0; i < blocks; i++) { + this.decipher(data.subarray(i*2)); + } +}; + +var BCRYPT_BLOCKS = 8, + BCRYPT_HASHSIZE = 32; + +function bcrypt_hash(sha2pass, sha2salt, out) { + var state = new Blowfish(), + cdata = new Uint32Array(BCRYPT_BLOCKS), i, + ciphertext = new Uint8Array([79,120,121,99,104,114,111,109,97,116,105, + 99,66,108,111,119,102,105,115,104,83,119,97,116,68,121,110,97,109, + 105,116,101]); //"OxychromaticBlowfishSwatDynamite" + + state.expandstate(sha2salt, 64, sha2pass, 64); + for (i = 0; i < 64; i++) { + state.expand0state(sha2salt, 64); + state.expand0state(sha2pass, 64); + } + + for (i = 0; i < BCRYPT_BLOCKS; i++) + cdata[i] = stream2word(ciphertext, ciphertext.byteLength); + for (i = 0; i < 64; i++) + state.enc(cdata, cdata.byteLength / 8); + + for (i = 0; i < BCRYPT_BLOCKS; i++) { + out[4*i+3] = cdata[i] >>> 24; + out[4*i+2] = cdata[i] >>> 16; + out[4*i+1] = cdata[i] >>> 8; + out[4*i+0] = cdata[i]; + } +}; + +function bcrypt_pbkdf(pass, passlen, salt, saltlen, key, keylen, rounds) { + var sha2pass = new Uint8Array(64), + sha2salt = new Uint8Array(64), + out = new Uint8Array(BCRYPT_HASHSIZE), + tmpout = new Uint8Array(BCRYPT_HASHSIZE), + countsalt = new Uint8Array(saltlen+4), + i, j, amt, stride, dest, count, + origkeylen = keylen; + + if (rounds < 1) + return -1; + if (passlen === 0 || saltlen === 0 || keylen === 0 || + keylen > (out.byteLength * out.byteLength) || saltlen > (1<<20)) + return -1; + + stride = Math.floor((keylen + out.byteLength - 1) / out.byteLength); + amt = Math.floor((keylen + stride - 1) / stride); + + for (i = 0; i < saltlen; i++) + countsalt[i] = salt[i]; + + crypto_hash_sha512(sha2pass, pass, passlen); + + for (count = 1; keylen > 0; count++) { + countsalt[saltlen+0] = count >>> 24; + countsalt[saltlen+1] = count >>> 16; + countsalt[saltlen+2] = count >>> 8; + countsalt[saltlen+3] = count; + + crypto_hash_sha512(sha2salt, countsalt, saltlen + 4); + bcrypt_hash(sha2pass, sha2salt, tmpout); + for (i = out.byteLength; i--;) + out[i] = tmpout[i]; + + for (i = 1; i < rounds; i++) { + crypto_hash_sha512(sha2salt, tmpout, tmpout.byteLength); + bcrypt_hash(sha2pass, sha2salt, tmpout); + for (j = 0; j < out.byteLength; j++) + out[j] ^= tmpout[j]; + } + + amt = Math.min(amt, keylen); + for (i = 0; i < amt; i++) { + dest = i * stride + (count - 1); + if (dest >= origkeylen) + break; + key[dest] = out[i]; + } + keylen -= i; + } + + return 0; +}; + +module.exports = { + BLOCKS: BCRYPT_BLOCKS, + HASHSIZE: BCRYPT_HASHSIZE, + hash: bcrypt_hash, + pbkdf: bcrypt_pbkdf +}; diff --git a/src/node_modules/bcrypt-pbkdf/package.json b/src/node_modules/bcrypt-pbkdf/package.json new file mode 100644 index 0000000..f9d5d41 --- /dev/null +++ b/src/node_modules/bcrypt-pbkdf/package.json @@ -0,0 +1,47 @@ +{ + "_args": [ + [ + "bcrypt-pbkdf@1.0.2", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "bcrypt-pbkdf@1.0.2", + "_id": "bcrypt-pbkdf@1.0.2", + "_inBundle": false, + "_integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "_location": "/bcrypt-pbkdf", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "bcrypt-pbkdf@1.0.2", + "name": "bcrypt-pbkdf", + "escapedName": "bcrypt-pbkdf", + "rawSpec": "1.0.2", + "saveSpec": null, + "fetchSpec": "1.0.2" + }, + "_requiredBy": [ + "/sshpk" + ], + "_resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "_spec": "1.0.2", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "bugs": { + "url": "https://github.com/joyent/node-bcrypt-pbkdf/issues" + }, + "dependencies": { + "tweetnacl": "^0.14.3" + }, + "description": "Port of the OpenBSD bcrypt_pbkdf function to pure JS", + "devDependencies": {}, + "homepage": "https://github.com/joyent/node-bcrypt-pbkdf#readme", + "license": "BSD-3-Clause", + "main": "index.js", + "name": "bcrypt-pbkdf", + "repository": { + "type": "git", + "url": "git://github.com/joyent/node-bcrypt-pbkdf.git" + }, + "version": "1.0.2" +} diff --git a/src/node_modules/browserify-mime/.npmignore b/src/node_modules/browserify-mime/.npmignore new file mode 100644 index 0000000..e2385fa --- /dev/null +++ b/src/node_modules/browserify-mime/.npmignore @@ -0,0 +1,3 @@ +types +generate.js +main.js diff --git a/src/node_modules/browserify-mime/LICENSE b/src/node_modules/browserify-mime/LICENSE new file mode 100644 index 0000000..451fc45 --- /dev/null +++ b/src/node_modules/browserify-mime/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2010 Benjamin Thomas, Robert Kieffer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/src/node_modules/browserify-mime/README.md b/src/node_modules/browserify-mime/README.md new file mode 100644 index 0000000..b90552a --- /dev/null +++ b/src/node_modules/browserify-mime/README.md @@ -0,0 +1,63 @@ +# mime + +Comprehensive MIME type mapping API. Includes all 600+ types and 800+ extensions defined by the Apache project, plus additional types submitted by the node.js community. + +## Install + +Install with [npm](http://github.com/isaacs/npm): + + npm install mime + +## API - Queries + +### mime.lookup(path) +Get the mime type associated with a file. Performs a case-insensitive lookup using the extension in `path` (the substring after the last '/' or '.'). E.g. + + var mime = require('mime'); + + mime.lookup('/path/to/file.txt'); // => 'text/plain' + mime.lookup('file.txt'); // => 'text/plain' + mime.lookup('.TXT'); // => 'text/plain' + mime.lookup('htm'); // => 'text/html' + +### mime.extension(type) +Get the default extension for `type` + + mime.extension('text/html'); // => 'html' + mime.extension('application/octet-stream'); // => 'bin' + +### mime.charsets.lookup() + +Map mime-type to charset + + mime.charsets.lookup('text/plain'); // => 'UTF-8' + +(The logic for charset lookups is pretty rudimentary. Feel free to suggest improvements.) + +## API - Defining Custom Types + +The following APIs allow you to add your own type mappings within your project. If you feel a type should be included as part of node-mime, see [requesting new types](https://github.com/broofa/node-mime/wiki/Requesting-New-Types). + +### mime.define() + +Add custom mime/extension mappings + + mime.define({ + 'text/x-some-format': ['x-sf', 'x-sft', 'x-sfml'], + 'application/x-my-type': ['x-mt', 'x-mtt'], + // etc ... + }); + + mime.lookup('x-sft'); // => 'text/x-some-format' + +The first entry in the extensions array is returned by `mime.extension()`. E.g. + + mime.extension('text/x-some-format'); // => 'x-sf' + +### mime.load(filepath) + +Load mappings from an Apache ".types" format file + + mime.load('./my_project.types'); + +The .types file format is simple - See the `types` dir for examples. diff --git a/src/node_modules/browserify-mime/browserify-mime.js b/src/node_modules/browserify-mime/browserify-mime.js new file mode 100644 index 0000000..d24bdbe --- /dev/null +++ b/src/node_modules/browserify-mime/browserify-mime.js @@ -0,0 +1,1813 @@ +//this file was generated +"use strict" +var mime = module.exports = { + lookup: function (path, fallback) { + var ext = path.replace(/.*[\.\/]/, '').toLowerCase(); + + return this.types[ext] || fallback || this.default_type; +} + , default_type: "application/octet-stream" + , types: { + "123": "application/vnd.lotus-1-2-3", + "ez": "application/andrew-inset", + "aw": "application/applixware", + "atom": "application/atom+xml", + "atomcat": "application/atomcat+xml", + "atomsvc": "application/atomsvc+xml", + "ccxml": "application/ccxml+xml", + "cdmia": "application/cdmi-capability", + "cdmic": "application/cdmi-container", + "cdmid": "application/cdmi-domain", + "cdmio": "application/cdmi-object", + "cdmiq": "application/cdmi-queue", + "cu": "application/cu-seeme", + "davmount": "application/davmount+xml", + "dbk": "application/docbook+xml", + "dssc": "application/dssc+der", + "xdssc": "application/dssc+xml", + "ecma": "application/ecmascript", + "emma": "application/emma+xml", + "epub": "application/epub+zip", + "exi": "application/exi", + "pfr": "application/font-tdpfr", + "gml": "application/gml+xml", + "gpx": "application/gpx+xml", + "gxf": "application/gxf", + "stk": "application/hyperstudio", + "ink": "application/inkml+xml", + "inkml": "application/inkml+xml", + "ipfix": "application/ipfix", + "jar": "application/java-archive", + "ser": "application/java-serialized-object", + "class": "application/java-vm", + "js": "application/javascript", + "json": "application/json", + "jsonml": "application/jsonml+json", + "lostxml": "application/lost+xml", + "hqx": "application/mac-binhex40", + "cpt": "application/mac-compactpro", + "mads": "application/mads+xml", + "mrc": "application/marc", + "mrcx": "application/marcxml+xml", + "ma": "application/mathematica", + "nb": "application/mathematica", + "mb": "application/mathematica", + "mathml": "application/mathml+xml", + "mbox": "application/mbox", + "mscml": "application/mediaservercontrol+xml", + "metalink": "application/metalink+xml", + "meta4": "application/metalink4+xml", + "mets": "application/mets+xml", + "mods": "application/mods+xml", + "m21": "application/mp21", + "mp21": "application/mp21", + "mp4s": "application/mp4", + "doc": "application/msword", + "dot": "application/msword", + "mxf": "application/mxf", + "bin": "application/octet-stream", + "dms": "application/octet-stream", + "lrf": "application/octet-stream", + "mar": "application/octet-stream", + "so": "application/octet-stream", + "dist": "application/octet-stream", + "distz": "application/octet-stream", + "pkg": "application/octet-stream", + "bpk": "application/octet-stream", + "dump": "application/octet-stream", + "elc": "application/octet-stream", + "deploy": "application/octet-stream", + "oda": "application/oda", + "opf": "application/oebps-package+xml", + "ogx": "application/ogg", + "omdoc": "application/omdoc+xml", + "onetoc": "application/onenote", + "onetoc2": "application/onenote", + "onetmp": "application/onenote", + "onepkg": "application/onenote", + "oxps": "application/oxps", + "xer": "application/patch-ops-error+xml", + "pdf": "application/pdf", + "pgp": "application/pgp-encrypted", + "asc": "application/pgp-signature", + "sig": "application/pgp-signature", + "prf": "application/pics-rules", + "p10": "application/pkcs10", + "p7m": "application/pkcs7-mime", + "p7c": "application/pkcs7-mime", + "p7s": "application/pkcs7-signature", + "p8": "application/pkcs8", + "ac": "application/pkix-attr-cert", + "cer": "application/pkix-cert", + "crl": "application/pkix-crl", + "pkipath": "application/pkix-pkipath", + "pki": "application/pkixcmp", + "pls": "application/pls+xml", + "ai": "application/postscript", + "eps": "application/postscript", + "ps": "application/postscript", + "cww": "application/prs.cww", + "pskcxml": "application/pskc+xml", + "rdf": "application/rdf+xml", + "rif": "application/reginfo+xml", + "rnc": "application/relax-ng-compact-syntax", + "rl": "application/resource-lists+xml", + "rld": "application/resource-lists-diff+xml", + "rs": "application/rls-services+xml", + "gbr": "application/rpki-ghostbusters", + "mft": "application/rpki-manifest", + "roa": "application/rpki-roa", + "rsd": "application/rsd+xml", + "rss": "application/rss+xml", + "rtf": "application/rtf", + "sbml": "application/sbml+xml", + "scq": "application/scvp-cv-request", + "scs": "application/scvp-cv-response", + "spq": "application/scvp-vp-request", + "spp": "application/scvp-vp-response", + "sdp": "application/sdp", + "setpay": "application/set-payment-initiation", + "setreg": "application/set-registration-initiation", + "shf": "application/shf+xml", + "smi": "application/smil+xml", + "smil": "application/smil+xml", + "rq": "application/sparql-query", + "srx": "application/sparql-results+xml", + "gram": "application/srgs", + "grxml": "application/srgs+xml", + "sru": "application/sru+xml", + "ssdl": "application/ssdl+xml", + "ssml": "application/ssml+xml", + "tei": "application/tei+xml", + "teicorpus": "application/tei+xml", + "tfi": "application/thraud+xml", + "tsd": "application/timestamped-data", + "plb": "application/vnd.3gpp.pic-bw-large", + "psb": "application/vnd.3gpp.pic-bw-small", + "pvb": "application/vnd.3gpp.pic-bw-var", + "tcap": "application/vnd.3gpp2.tcap", + "pwn": "application/vnd.3m.post-it-notes", + "aso": "application/vnd.accpac.simply.aso", + "imp": "application/vnd.accpac.simply.imp", + "acu": "application/vnd.acucobol", + "atc": "application/vnd.acucorp", + "acutc": "application/vnd.acucorp", + "air": "application/vnd.adobe.air-application-installer-package+zip", + "fcdt": "application/vnd.adobe.formscentral.fcdt", + "fxp": "application/vnd.adobe.fxp", + "fxpl": "application/vnd.adobe.fxp", + "xdp": "application/vnd.adobe.xdp+xml", + "xfdf": "application/vnd.adobe.xfdf", + "ahead": "application/vnd.ahead.space", + "azf": "application/vnd.airzip.filesecure.azf", + "azs": "application/vnd.airzip.filesecure.azs", + "azw": "application/vnd.amazon.ebook", + "acc": "application/vnd.americandynamics.acc", + "ami": "application/vnd.amiga.ami", + "apk": "application/vnd.android.package-archive", + "cii": "application/vnd.anser-web-certificate-issue-initiation", + "fti": "application/vnd.anser-web-funds-transfer-initiation", + "atx": "application/vnd.antix.game-component", + "mpkg": "application/vnd.apple.installer+xml", + "m3u8": "application/vnd.apple.mpegurl", + "swi": "application/vnd.aristanetworks.swi", + "iota": "application/vnd.astraea-software.iota", + "aep": "application/vnd.audiograph", + "mpm": "application/vnd.blueice.multipass", + "bmi": "application/vnd.bmi", + "rep": "application/vnd.businessobjects", + "cdxml": "application/vnd.chemdraw+xml", + "mmd": "application/vnd.chipnuts.karaoke-mmd", + "cdy": "application/vnd.cinderella", + "cla": "application/vnd.claymore", + "rp9": "application/vnd.cloanto.rp9", + "c4g": "application/vnd.clonk.c4group", + "c4d": "application/vnd.clonk.c4group", + "c4f": "application/vnd.clonk.c4group", + "c4p": "application/vnd.clonk.c4group", + "c4u": "application/vnd.clonk.c4group", + "c11amc": "application/vnd.cluetrust.cartomobile-config", + "c11amz": "application/vnd.cluetrust.cartomobile-config-pkg", + "csp": "application/vnd.commonspace", + "cdbcmsg": "application/vnd.contact.cmsg", + "cmc": "application/vnd.cosmocaller", + "clkx": "application/vnd.crick.clicker", + "clkk": "application/vnd.crick.clicker.keyboard", + "clkp": "application/vnd.crick.clicker.palette", + "clkt": "application/vnd.crick.clicker.template", + "clkw": "application/vnd.crick.clicker.wordbank", + "wbs": "application/vnd.criticaltools.wbs+xml", + "pml": "application/vnd.ctc-posml", + "ppd": "application/vnd.cups-ppd", + "car": "application/vnd.curl.car", + "pcurl": "application/vnd.curl.pcurl", + "dart": "application/vnd.dart", + "rdz": "application/vnd.data-vision.rdz", + "uvf": "application/vnd.dece.data", + "uvvf": "application/vnd.dece.data", + "uvd": "application/vnd.dece.data", + "uvvd": "application/vnd.dece.data", + "uvt": "application/vnd.dece.ttml+xml", + "uvvt": "application/vnd.dece.ttml+xml", + "uvx": "application/vnd.dece.unspecified", + "uvvx": "application/vnd.dece.unspecified", + "uvz": "application/vnd.dece.zip", + "uvvz": "application/vnd.dece.zip", + "fe_launch": "application/vnd.denovo.fcselayout-link", + "dna": "application/vnd.dna", + "mlp": "application/vnd.dolby.mlp", + "dpg": "application/vnd.dpgraph", + "dfac": "application/vnd.dreamfactory", + "kpxx": "application/vnd.ds-keypoint", + "ait": "application/vnd.dvb.ait", + "svc": "application/vnd.dvb.service", + "geo": "application/vnd.dynageo", + "mag": "application/vnd.ecowin.chart", + "nml": "application/vnd.enliven", + "esf": "application/vnd.epson.esf", + "msf": "application/vnd.epson.msf", + "qam": "application/vnd.epson.quickanime", + "slt": "application/vnd.epson.salt", + "ssf": "application/vnd.epson.ssf", + "es3": "application/vnd.eszigno3+xml", + "et3": "application/vnd.eszigno3+xml", + "ez2": "application/vnd.ezpix-album", + "ez3": "application/vnd.ezpix-package", + "fdf": "application/vnd.fdf", + "mseed": "application/vnd.fdsn.mseed", + "seed": "application/vnd.fdsn.seed", + "dataless": "application/vnd.fdsn.seed", + "gph": "application/vnd.flographit", + "ftc": "application/vnd.fluxtime.clip", + "fm": "application/vnd.framemaker", + "frame": "application/vnd.framemaker", + "maker": "application/vnd.framemaker", + "book": "application/vnd.framemaker", + "fnc": "application/vnd.frogans.fnc", + "ltf": "application/vnd.frogans.ltf", + "fsc": "application/vnd.fsc.weblaunch", + "oas": "application/vnd.fujitsu.oasys", + "oa2": "application/vnd.fujitsu.oasys2", + "oa3": "application/vnd.fujitsu.oasys3", + "fg5": "application/vnd.fujitsu.oasysgp", + "bh2": "application/vnd.fujitsu.oasysprs", + "ddd": "application/vnd.fujixerox.ddd", + "xdw": "application/vnd.fujixerox.docuworks", + "xbd": "application/vnd.fujixerox.docuworks.binder", + "fzs": "application/vnd.fuzzysheet", + "txd": "application/vnd.genomatix.tuxedo", + "ggb": "application/vnd.geogebra.file", + "ggt": "application/vnd.geogebra.tool", + "gex": "application/vnd.geometry-explorer", + "gre": "application/vnd.geometry-explorer", + "gxt": "application/vnd.geonext", + "g2w": "application/vnd.geoplan", + "g3w": "application/vnd.geospace", + "gmx": "application/vnd.gmx", + "kml": "application/vnd.google-earth.kml+xml", + "kmz": "application/vnd.google-earth.kmz", + "gqf": "application/vnd.grafeq", + "gqs": "application/vnd.grafeq", + "gac": "application/vnd.groove-account", + "ghf": "application/vnd.groove-help", + "gim": "application/vnd.groove-identity-message", + "grv": "application/vnd.groove-injector", + "gtm": "application/vnd.groove-tool-message", + "tpl": "application/vnd.groove-tool-template", + "vcg": "application/vnd.groove-vcard", + "hal": "application/vnd.hal+xml", + "zmm": "application/vnd.handheld-entertainment+xml", + "hbci": "application/vnd.hbci", + "les": "application/vnd.hhe.lesson-player", + "hpgl": "application/vnd.hp-hpgl", + "hpid": "application/vnd.hp-hpid", + "hps": "application/vnd.hp-hps", + "jlt": "application/vnd.hp-jlyt", + "pcl": "application/vnd.hp-pcl", + "pclxl": "application/vnd.hp-pclxl", + "sfd-hdstx": "application/vnd.hydrostatix.sof-data", + "mpy": "application/vnd.ibm.minipay", + "afp": "application/vnd.ibm.modcap", + "listafp": "application/vnd.ibm.modcap", + "list3820": "application/vnd.ibm.modcap", + "irm": "application/vnd.ibm.rights-management", + "sc": "application/vnd.ibm.secure-container", + "icc": "application/vnd.iccprofile", + "icm": "application/vnd.iccprofile", + "igl": "application/vnd.igloader", + "ivp": "application/vnd.immervision-ivp", + "ivu": "application/vnd.immervision-ivu", + "igm": "application/vnd.insors.igm", + "xpw": "application/vnd.intercon.formnet", + "xpx": "application/vnd.intercon.formnet", + "i2g": "application/vnd.intergeo", + "qbo": "application/vnd.intu.qbo", + "qfx": "application/vnd.intu.qfx", + "rcprofile": "application/vnd.ipunplugged.rcprofile", + "irp": "application/vnd.irepository.package+xml", + "xpr": "application/vnd.is-xpr", + "fcs": "application/vnd.isac.fcs", + "jam": "application/vnd.jam", + "rms": "application/vnd.jcp.javame.midlet-rms", + "jisp": "application/vnd.jisp", + "joda": "application/vnd.joost.joda-archive", + "ktz": "application/vnd.kahootz", + "ktr": "application/vnd.kahootz", + "karbon": "application/vnd.kde.karbon", + "chrt": "application/vnd.kde.kchart", + "kfo": "application/vnd.kde.kformula", + "flw": "application/vnd.kde.kivio", + "kon": "application/vnd.kde.kontour", + "kpr": "application/vnd.kde.kpresenter", + "kpt": "application/vnd.kde.kpresenter", + "ksp": "application/vnd.kde.kspread", + "kwd": "application/vnd.kde.kword", + "kwt": "application/vnd.kde.kword", + "htke": "application/vnd.kenameaapp", + "kia": "application/vnd.kidspiration", + "kne": "application/vnd.kinar", + "knp": "application/vnd.kinar", + "skp": "application/vnd.koan", + "skd": "application/vnd.koan", + "skt": "application/vnd.koan", + "skm": "application/vnd.koan", + "sse": "application/vnd.kodak-descriptor", + "lasxml": "application/vnd.las.las+xml", + "lbd": "application/vnd.llamagraphics.life-balance.desktop", + "lbe": "application/vnd.llamagraphics.life-balance.exchange+xml", + "apr": "application/vnd.lotus-approach", + "pre": "application/vnd.lotus-freelance", + "nsf": "application/vnd.lotus-notes", + "org": "application/vnd.lotus-organizer", + "scm": "application/vnd.lotus-screencam", + "lwp": "application/vnd.lotus-wordpro", + "portpkg": "application/vnd.macports.portpkg", + "mcd": "application/vnd.mcd", + "mc1": "application/vnd.medcalcdata", + "cdkey": "application/vnd.mediastation.cdkey", + "mwf": "application/vnd.mfer", + "mfm": "application/vnd.mfmp", + "flo": "application/vnd.micrografx.flo", + "igx": "application/vnd.micrografx.igx", + "mif": "application/vnd.mif", + "daf": "application/vnd.mobius.daf", + "dis": "application/vnd.mobius.dis", + "mbk": "application/vnd.mobius.mbk", + "mqy": "application/vnd.mobius.mqy", + "msl": "application/vnd.mobius.msl", + "plc": "application/vnd.mobius.plc", + "txf": "application/vnd.mobius.txf", + "mpn": "application/vnd.mophun.application", + "mpc": "application/vnd.mophun.certificate", + "xul": "application/vnd.mozilla.xul+xml", + "cil": "application/vnd.ms-artgalry", + "cab": "application/vnd.ms-cab-compressed", + "xls": "application/vnd.ms-excel", + "xlm": "application/vnd.ms-excel", + "xla": "application/vnd.ms-excel", + "xlc": "application/vnd.ms-excel", + "xlt": "application/vnd.ms-excel", + "xlw": "application/vnd.ms-excel", + "xlam": "application/vnd.ms-excel.addin.macroenabled.12", + "xlsb": "application/vnd.ms-excel.sheet.binary.macroenabled.12", + "xlsm": "application/vnd.ms-excel.sheet.macroenabled.12", + "xltm": "application/vnd.ms-excel.template.macroenabled.12", + "eot": "application/vnd.ms-fontobject", + "chm": "application/vnd.ms-htmlhelp", + "ims": "application/vnd.ms-ims", + "lrm": "application/vnd.ms-lrm", + "thmx": "application/vnd.ms-officetheme", + "cat": "application/vnd.ms-pki.seccat", + "stl": "application/vnd.ms-pki.stl", + "ppt": "application/vnd.ms-powerpoint", + "pps": "application/vnd.ms-powerpoint", + "pot": "application/vnd.ms-powerpoint", + "ppam": "application/vnd.ms-powerpoint.addin.macroenabled.12", + "pptm": "application/vnd.ms-powerpoint.presentation.macroenabled.12", + "sldm": "application/vnd.ms-powerpoint.slide.macroenabled.12", + "ppsm": "application/vnd.ms-powerpoint.slideshow.macroenabled.12", + "potm": "application/vnd.ms-powerpoint.template.macroenabled.12", + "mpp": "application/vnd.ms-project", + "mpt": "application/vnd.ms-project", + "docm": "application/vnd.ms-word.document.macroenabled.12", + "dotm": "application/vnd.ms-word.template.macroenabled.12", + "wps": "application/vnd.ms-works", + "wks": "application/vnd.ms-works", + "wcm": "application/vnd.ms-works", + "wdb": "application/vnd.ms-works", + "wpl": "application/vnd.ms-wpl", + "xps": "application/vnd.ms-xpsdocument", + "mseq": "application/vnd.mseq", + "mus": "application/vnd.musician", + "msty": "application/vnd.muvee.style", + "taglet": "application/vnd.mynfc", + "nlu": "application/vnd.neurolanguage.nlu", + "ntf": "application/vnd.nitf", + "nitf": "application/vnd.nitf", + "nnd": "application/vnd.noblenet-directory", + "nns": "application/vnd.noblenet-sealer", + "nnw": "application/vnd.noblenet-web", + "ngdat": "application/vnd.nokia.n-gage.data", + "n-gage": "application/vnd.nokia.n-gage.symbian.install", + "rpst": "application/vnd.nokia.radio-preset", + "rpss": "application/vnd.nokia.radio-presets", + "edm": "application/vnd.novadigm.edm", + "edx": "application/vnd.novadigm.edx", + "ext": "application/vnd.novadigm.ext", + "odc": "application/vnd.oasis.opendocument.chart", + "otc": "application/vnd.oasis.opendocument.chart-template", + "odb": "application/vnd.oasis.opendocument.database", + "odf": "application/vnd.oasis.opendocument.formula", + "odft": "application/vnd.oasis.opendocument.formula-template", + "odg": "application/vnd.oasis.opendocument.graphics", + "otg": "application/vnd.oasis.opendocument.graphics-template", + "odi": "application/vnd.oasis.opendocument.image", + "oti": "application/vnd.oasis.opendocument.image-template", + "odp": "application/vnd.oasis.opendocument.presentation", + "otp": "application/vnd.oasis.opendocument.presentation-template", + "ods": "application/vnd.oasis.opendocument.spreadsheet", + "ots": "application/vnd.oasis.opendocument.spreadsheet-template", + "odt": "application/vnd.oasis.opendocument.text", + "odm": "application/vnd.oasis.opendocument.text-master", + "ott": "application/vnd.oasis.opendocument.text-template", + "oth": "application/vnd.oasis.opendocument.text-web", + "xo": "application/vnd.olpc-sugar", + "dd2": "application/vnd.oma.dd2+xml", + "oxt": "application/vnd.openofficeorg.extension", + "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + "ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + "potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + "mgp": "application/vnd.osgeo.mapguide.package", + "dp": "application/vnd.osgi.dp", + "esa": "application/vnd.osgi.subsystem", + "pdb": "application/vnd.palm", + "pqa": "application/vnd.palm", + "oprc": "application/vnd.palm", + "paw": "application/vnd.pawaafile", + "str": "application/vnd.pg.format", + "ei6": "application/vnd.pg.osasli", + "efif": "application/vnd.picsel", + "wg": "application/vnd.pmi.widget", + "plf": "application/vnd.pocketlearn", + "pbd": "application/vnd.powerbuilder6", + "box": "application/vnd.previewsystems.box", + "mgz": "application/vnd.proteus.magazine", + "qps": "application/vnd.publishare-delta-tree", + "ptid": "application/vnd.pvi.ptid1", + "qxd": "application/vnd.quark.quarkxpress", + "qxt": "application/vnd.quark.quarkxpress", + "qwd": "application/vnd.quark.quarkxpress", + "qwt": "application/vnd.quark.quarkxpress", + "qxl": "application/vnd.quark.quarkxpress", + "qxb": "application/vnd.quark.quarkxpress", + "bed": "application/vnd.realvnc.bed", + "mxl": "application/vnd.recordare.musicxml", + "musicxml": "application/vnd.recordare.musicxml+xml", + "cryptonote": "application/vnd.rig.cryptonote", + "cod": "application/vnd.rim.cod", + "rm": "application/vnd.rn-realmedia", + "rmvb": "application/vnd.rn-realmedia-vbr", + "link66": "application/vnd.route66.link66+xml", + "st": "application/vnd.sailingtracker.track", + "see": "application/vnd.seemail", + "sema": "application/vnd.sema", + "semd": "application/vnd.semd", + "semf": "application/vnd.semf", + "ifm": "application/vnd.shana.informed.formdata", + "itp": "application/vnd.shana.informed.formtemplate", + "iif": "application/vnd.shana.informed.interchange", + "ipk": "application/vnd.shana.informed.package", + "twd": "application/vnd.simtech-mindmapper", + "twds": "application/vnd.simtech-mindmapper", + "mmf": "application/vnd.smaf", + "teacher": "application/vnd.smart.teacher", + "sdkm": "application/vnd.solent.sdkm+xml", + "sdkd": "application/vnd.solent.sdkm+xml", + "dxp": "application/vnd.spotfire.dxp", + "sfs": "application/vnd.spotfire.sfs", + "sdc": "application/vnd.stardivision.calc", + "sda": "application/vnd.stardivision.draw", + "sdd": "application/vnd.stardivision.impress", + "smf": "application/vnd.stardivision.math", + "sdw": "application/vnd.stardivision.writer", + "vor": "application/vnd.stardivision.writer", + "sgl": "application/vnd.stardivision.writer-global", + "smzip": "application/vnd.stepmania.package", + "sm": "application/vnd.stepmania.stepchart", + "sxc": "application/vnd.sun.xml.calc", + "stc": "application/vnd.sun.xml.calc.template", + "sxd": "application/vnd.sun.xml.draw", + "std": "application/vnd.sun.xml.draw.template", + "sxi": "application/vnd.sun.xml.impress", + "sti": "application/vnd.sun.xml.impress.template", + "sxm": "application/vnd.sun.xml.math", + "sxw": "application/vnd.sun.xml.writer", + "sxg": "application/vnd.sun.xml.writer.global", + "stw": "application/vnd.sun.xml.writer.template", + "sus": "application/vnd.sus-calendar", + "susp": "application/vnd.sus-calendar", + "svd": "application/vnd.svd", + "sis": "application/vnd.symbian.install", + "sisx": "application/vnd.symbian.install", + "xsm": "application/vnd.syncml+xml", + "bdm": "application/vnd.syncml.dm+wbxml", + "xdm": "application/vnd.syncml.dm+xml", + "tao": "application/vnd.tao.intent-module-archive", + "pcap": "application/vnd.tcpdump.pcap", + "cap": "application/vnd.tcpdump.pcap", + "dmp": "application/vnd.tcpdump.pcap", + "tmo": "application/vnd.tmobile-livetv", + "tpt": "application/vnd.trid.tpt", + "mxs": "application/vnd.triscape.mxs", + "tra": "application/vnd.trueapp", + "ufd": "application/vnd.ufdl", + "ufdl": "application/vnd.ufdl", + "utz": "application/vnd.uiq.theme", + "umj": "application/vnd.umajin", + "unityweb": "application/vnd.unity", + "uoml": "application/vnd.uoml+xml", + "vcx": "application/vnd.vcx", + "vsd": "application/vnd.visio", + "vst": "application/vnd.visio", + "vss": "application/vnd.visio", + "vsw": "application/vnd.visio", + "vis": "application/vnd.visionary", + "vsf": "application/vnd.vsf", + "wbxml": "application/vnd.wap.wbxml", + "wmlc": "application/vnd.wap.wmlc", + "wmlsc": "application/vnd.wap.wmlscriptc", + "wtb": "application/vnd.webturbo", + "nbp": "application/vnd.wolfram.player", + "wpd": "application/vnd.wordperfect", + "wqd": "application/vnd.wqd", + "stf": "application/vnd.wt.stf", + "xar": "application/vnd.xara", + "xfdl": "application/vnd.xfdl", + "hvd": "application/vnd.yamaha.hv-dic", + "hvs": "application/vnd.yamaha.hv-script", + "hvp": "application/vnd.yamaha.hv-voice", + "osf": "application/vnd.yamaha.openscoreformat", + "osfpvg": "application/vnd.yamaha.openscoreformat.osfpvg+xml", + "saf": "application/vnd.yamaha.smaf-audio", + "spf": "application/vnd.yamaha.smaf-phrase", + "cmp": "application/vnd.yellowriver-custom-menu", + "zir": "application/vnd.zul", + "zirz": "application/vnd.zul", + "zaz": "application/vnd.zzazz.deck+xml", + "vxml": "application/voicexml+xml", + "wgt": "application/widget", + "hlp": "application/winhlp", + "wsdl": "application/wsdl+xml", + "wspolicy": "application/wspolicy+xml", + "7z": "application/x-7z-compressed", + "abw": "application/x-abiword", + "ace": "application/x-ace-compressed", + "dmg": "application/x-apple-diskimage", + "aab": "application/x-authorware-bin", + "x32": "application/x-authorware-bin", + "u32": "application/x-authorware-bin", + "vox": "application/x-authorware-bin", + "aam": "application/x-authorware-map", + "aas": "application/x-authorware-seg", + "bcpio": "application/x-bcpio", + "torrent": "application/x-bittorrent", + "blb": "application/x-blorb", + "blorb": "application/x-blorb", + "bz": "application/x-bzip", + "bz2": "application/x-bzip2", + "boz": "application/x-bzip2", + "cbr": "application/x-cbr", + "cba": "application/x-cbr", + "cbt": "application/x-cbr", + "cbz": "application/x-cbr", + "cb7": "application/x-cbr", + "vcd": "application/x-cdlink", + "cfs": "application/x-cfs-compressed", + "chat": "application/x-chat", + "pgn": "application/x-chess-pgn", + "nsc": "application/x-conference", + "cpio": "application/x-cpio", + "csh": "application/x-csh", + "deb": "application/x-debian-package", + "udeb": "application/x-debian-package", + "dgc": "application/x-dgc-compressed", + "dir": "application/x-director", + "dcr": "application/x-director", + "dxr": "application/x-director", + "cst": "application/x-director", + "cct": "application/x-director", + "cxt": "application/x-director", + "w3d": "application/x-director", + "fgd": "application/x-director", + "swa": "application/x-director", + "wad": "application/x-doom", + "ncx": "application/x-dtbncx+xml", + "dtb": "application/x-dtbook+xml", + "res": "application/x-dtbresource+xml", + "dvi": "application/x-dvi", + "evy": "application/x-envoy", + "eva": "application/x-eva", + "bdf": "application/x-font-bdf", + "gsf": "application/x-font-ghostscript", + "psf": "application/x-font-linux-psf", + "otf": "application/x-font-otf", + "pcf": "application/x-font-pcf", + "snf": "application/x-font-snf", + "ttf": "application/x-font-ttf", + "ttc": "application/x-font-ttf", + "pfa": "application/x-font-type1", + "pfb": "application/x-font-type1", + "pfm": "application/x-font-type1", + "afm": "application/x-font-type1", + "woff": "application/x-font-woff", + "arc": "application/x-freearc", + "spl": "application/x-futuresplash", + "gca": "application/x-gca-compressed", + "ulx": "application/x-glulx", + "gnumeric": "application/x-gnumeric", + "gramps": "application/x-gramps-xml", + "gtar": "application/x-gtar", + "hdf": "application/x-hdf", + "install": "application/x-install-instructions", + "iso": "application/x-iso9660-image", + "jnlp": "application/x-java-jnlp-file", + "latex": "application/x-latex", + "lzh": "application/x-lzh-compressed", + "lha": "application/x-lzh-compressed", + "mie": "application/x-mie", + "prc": "application/x-mobipocket-ebook", + "mobi": "application/x-mobipocket-ebook", + "application": "application/x-ms-application", + "lnk": "application/x-ms-shortcut", + "wmd": "application/x-ms-wmd", + "wmz": "application/x-msmetafile", + "xbap": "application/x-ms-xbap", + "mdb": "application/x-msaccess", + "obd": "application/x-msbinder", + "crd": "application/x-mscardfile", + "clp": "application/x-msclip", + "exe": "application/x-msdownload", + "dll": "application/x-msdownload", + "com": "application/x-msdownload", + "bat": "application/x-msdownload", + "msi": "application/x-msdownload", + "mvb": "application/x-msmediaview", + "m13": "application/x-msmediaview", + "m14": "application/x-msmediaview", + "wmf": "application/x-msmetafile", + "emf": "application/x-msmetafile", + "emz": "application/x-msmetafile", + "mny": "application/x-msmoney", + "pub": "application/x-mspublisher", + "scd": "application/x-msschedule", + "trm": "application/x-msterminal", + "wri": "application/x-mswrite", + "nc": "application/x-netcdf", + "cdf": "application/x-netcdf", + "nzb": "application/x-nzb", + "p12": "application/x-pkcs12", + "pfx": "application/x-pkcs12", + "p7b": "application/x-pkcs7-certificates", + "spc": "application/x-pkcs7-certificates", + "p7r": "application/x-pkcs7-certreqresp", + "rar": "application/x-rar-compressed", + "ris": "application/x-research-info-systems", + "sh": "application/x-sh", + "shar": "application/x-shar", + "swf": "application/x-shockwave-flash", + "xap": "application/x-silverlight-app", + "sql": "application/x-sql", + "sit": "application/x-stuffit", + "sitx": "application/x-stuffitx", + "srt": "application/x-subrip", + "sv4cpio": "application/x-sv4cpio", + "sv4crc": "application/x-sv4crc", + "t3": "application/x-t3vm-image", + "gam": "application/x-tads", + "tar": "application/x-tar", + "tcl": "application/x-tcl", + "tex": "application/x-tex", + "tfm": "application/x-tex-tfm", + "texinfo": "application/x-texinfo", + "texi": "application/x-texinfo", + "obj": "application/x-tgif", + "ustar": "application/x-ustar", + "src": "application/x-wais-source", + "der": "application/x-x509-ca-cert", + "crt": "application/x-x509-ca-cert", + "fig": "application/x-xfig", + "xlf": "application/x-xliff+xml", + "xpi": "application/x-xpinstall", + "xz": "application/x-xz", + "z1": "application/x-zmachine", + "z2": "application/x-zmachine", + "z3": "application/x-zmachine", + "z4": "application/x-zmachine", + "z5": "application/x-zmachine", + "z6": "application/x-zmachine", + "z7": "application/x-zmachine", + "z8": "application/x-zmachine", + "xaml": "application/xaml+xml", + "xdf": "application/xcap-diff+xml", + "xenc": "application/xenc+xml", + "xhtml": "application/xhtml+xml", + "xht": "application/xhtml+xml", + "xml": "application/xml", + "xsl": "application/xml", + "dtd": "application/xml-dtd", + "xop": "application/xop+xml", + "xpl": "application/xproc+xml", + "xslt": "application/xslt+xml", + "xspf": "application/xspf+xml", + "mxml": "application/xv+xml", + "xhvml": "application/xv+xml", + "xvml": "application/xv+xml", + "xvm": "application/xv+xml", + "yang": "application/yang", + "yin": "application/yin+xml", + "zip": "application/zip", + "adp": "audio/adpcm", + "au": "audio/basic", + "snd": "audio/basic", + "mid": "audio/midi", + "midi": "audio/midi", + "kar": "audio/midi", + "rmi": "audio/midi", + "mp4a": "audio/mp4", + "mpga": "audio/mpeg", + "mp2": "audio/mpeg", + "mp2a": "audio/mpeg", + "mp3": "audio/mpeg", + "m2a": "audio/mpeg", + "m3a": "audio/mpeg", + "oga": "audio/ogg", + "ogg": "audio/ogg", + "spx": "audio/ogg", + "s3m": "audio/s3m", + "sil": "audio/silk", + "uva": "audio/vnd.dece.audio", + "uvva": "audio/vnd.dece.audio", + "eol": "audio/vnd.digital-winds", + "dra": "audio/vnd.dra", + "dts": "audio/vnd.dts", + "dtshd": "audio/vnd.dts.hd", + "lvp": "audio/vnd.lucent.voice", + "pya": "audio/vnd.ms-playready.media.pya", + "ecelp4800": "audio/vnd.nuera.ecelp4800", + "ecelp7470": "audio/vnd.nuera.ecelp7470", + "ecelp9600": "audio/vnd.nuera.ecelp9600", + "rip": "audio/vnd.rip", + "weba": "audio/webm", + "aac": "audio/x-aac", + "aif": "audio/x-aiff", + "aiff": "audio/x-aiff", + "aifc": "audio/x-aiff", + "caf": "audio/x-caf", + "flac": "audio/x-flac", + "mka": "audio/x-matroska", + "m3u": "audio/x-mpegurl", + "wax": "audio/x-ms-wax", + "wma": "audio/x-ms-wma", + "ram": "audio/x-pn-realaudio", + "ra": "audio/x-pn-realaudio", + "rmp": "audio/x-pn-realaudio-plugin", + "wav": "audio/x-wav", + "xm": "audio/xm", + "cdx": "chemical/x-cdx", + "cif": "chemical/x-cif", + "cmdf": "chemical/x-cmdf", + "cml": "chemical/x-cml", + "csml": "chemical/x-csml", + "xyz": "chemical/x-xyz", + "bmp": "image/bmp", + "cgm": "image/cgm", + "g3": "image/g3fax", + "gif": "image/gif", + "ief": "image/ief", + "jpeg": "image/jpeg", + "jpg": "image/jpeg", + "jpe": "image/jpeg", + "ktx": "image/ktx", + "png": "image/png", + "btif": "image/prs.btif", + "sgi": "image/sgi", + "svg": "image/svg+xml", + "svgz": "image/svg+xml", + "tiff": "image/tiff", + "tif": "image/tiff", + "psd": "image/vnd.adobe.photoshop", + "uvi": "image/vnd.dece.graphic", + "uvvi": "image/vnd.dece.graphic", + "uvg": "image/vnd.dece.graphic", + "uvvg": "image/vnd.dece.graphic", + "sub": "text/vnd.dvb.subtitle", + "djvu": "image/vnd.djvu", + "djv": "image/vnd.djvu", + "dwg": "image/vnd.dwg", + "dxf": "image/vnd.dxf", + "fbs": "image/vnd.fastbidsheet", + "fpx": "image/vnd.fpx", + "fst": "image/vnd.fst", + "mmr": "image/vnd.fujixerox.edmics-mmr", + "rlc": "image/vnd.fujixerox.edmics-rlc", + "mdi": "image/vnd.ms-modi", + "wdp": "image/vnd.ms-photo", + "npx": "image/vnd.net-fpx", + "wbmp": "image/vnd.wap.wbmp", + "xif": "image/vnd.xiff", + "webp": "image/webp", + "3ds": "image/x-3ds", + "ras": "image/x-cmu-raster", + "cmx": "image/x-cmx", + "fh": "image/x-freehand", + "fhc": "image/x-freehand", + "fh4": "image/x-freehand", + "fh5": "image/x-freehand", + "fh7": "image/x-freehand", + "ico": "image/x-icon", + "sid": "image/x-mrsid-image", + "pcx": "image/x-pcx", + "pic": "image/x-pict", + "pct": "image/x-pict", + "pnm": "image/x-portable-anymap", + "pbm": "image/x-portable-bitmap", + "pgm": "image/x-portable-graymap", + "ppm": "image/x-portable-pixmap", + "rgb": "image/x-rgb", + "tga": "image/x-tga", + "xbm": "image/x-xbitmap", + "xpm": "image/x-xpixmap", + "xwd": "image/x-xwindowdump", + "eml": "message/rfc822", + "mime": "message/rfc822", + "igs": "model/iges", + "iges": "model/iges", + "msh": "model/mesh", + "mesh": "model/mesh", + "silo": "model/mesh", + "dae": "model/vnd.collada+xml", + "dwf": "model/vnd.dwf", + "gdl": "model/vnd.gdl", + "gtw": "model/vnd.gtw", + "mts": "model/vnd.mts", + "vtu": "model/vnd.vtu", + "wrl": "model/vrml", + "vrml": "model/vrml", + "x3db": "model/x3d+binary", + "x3dbz": "model/x3d+binary", + "x3dv": "model/x3d+vrml", + "x3dvz": "model/x3d+vrml", + "x3d": "model/x3d+xml", + "x3dz": "model/x3d+xml", + "appcache": "text/cache-manifest", + "ics": "text/calendar", + "ifb": "text/calendar", + "css": "text/css", + "csv": "text/csv", + "html": "text/html", + "htm": "text/html", + "n3": "text/n3", + "txt": "text/plain", + "text": "text/plain", + "conf": "text/plain", + "def": "text/plain", + "list": "text/plain", + "log": "text/plain", + "in": "text/plain", + "dsc": "text/prs.lines.tag", + "rtx": "text/richtext", + "sgml": "text/sgml", + "sgm": "text/sgml", + "tsv": "text/tab-separated-values", + "t": "text/troff", + "tr": "text/troff", + "roff": "text/troff", + "man": "text/troff", + "me": "text/troff", + "ms": "text/troff", + "ttl": "text/turtle", + "uri": "text/uri-list", + "uris": "text/uri-list", + "urls": "text/uri-list", + "vcard": "text/vcard", + "curl": "text/vnd.curl", + "dcurl": "text/vnd.curl.dcurl", + "scurl": "text/vnd.curl.scurl", + "mcurl": "text/vnd.curl.mcurl", + "fly": "text/vnd.fly", + "flx": "text/vnd.fmi.flexstor", + "gv": "text/vnd.graphviz", + "3dml": "text/vnd.in3d.3dml", + "spot": "text/vnd.in3d.spot", + "jad": "text/vnd.sun.j2me.app-descriptor", + "wml": "text/vnd.wap.wml", + "wmls": "text/vnd.wap.wmlscript", + "s": "text/x-asm", + "asm": "text/x-asm", + "c": "text/x-c", + "cc": "text/x-c", + "cxx": "text/x-c", + "cpp": "text/x-c", + "h": "text/x-c", + "hh": "text/x-c", + "dic": "text/x-c", + "f": "text/x-fortran", + "for": "text/x-fortran", + "f77": "text/x-fortran", + "f90": "text/x-fortran", + "java": "text/x-java-source", + "opml": "text/x-opml", + "p": "text/x-pascal", + "pas": "text/x-pascal", + "nfo": "text/x-nfo", + "etx": "text/x-setext", + "sfv": "text/x-sfv", + "uu": "text/x-uuencode", + "vcs": "text/x-vcalendar", + "vcf": "text/x-vcard", + "3gp": "video/3gpp", + "3g2": "video/3gpp2", + "h261": "video/h261", + "h263": "video/h263", + "h264": "video/h264", + "jpgv": "video/jpeg", + "jpm": "video/jpm", + "jpgm": "video/jpm", + "mj2": "video/mj2", + "mjp2": "video/mj2", + "mp4": "video/mp4", + "mp4v": "video/mp4", + "mpg4": "video/mp4", + "mpeg": "video/mpeg", + "mpg": "video/mpeg", + "mpe": "video/mpeg", + "m1v": "video/mpeg", + "m2v": "video/mpeg", + "ogv": "video/ogg", + "qt": "video/quicktime", + "mov": "video/quicktime", + "uvh": "video/vnd.dece.hd", + "uvvh": "video/vnd.dece.hd", + "uvm": "video/vnd.dece.mobile", + "uvvm": "video/vnd.dece.mobile", + "uvp": "video/vnd.dece.pd", + "uvvp": "video/vnd.dece.pd", + "uvs": "video/vnd.dece.sd", + "uvvs": "video/vnd.dece.sd", + "uvv": "video/vnd.dece.video", + "uvvv": "video/vnd.dece.video", + "dvb": "video/vnd.dvb.file", + "fvt": "video/vnd.fvt", + "mxu": "video/vnd.mpegurl", + "m4u": "video/vnd.mpegurl", + "pyv": "video/vnd.ms-playready.media.pyv", + "uvu": "video/vnd.uvvu.mp4", + "uvvu": "video/vnd.uvvu.mp4", + "viv": "video/vnd.vivo", + "webm": "video/webm", + "f4v": "video/x-f4v", + "fli": "video/x-fli", + "flv": "video/x-flv", + "m4v": "video/x-m4v", + "mkv": "video/x-matroska", + "mk3d": "video/x-matroska", + "mks": "video/x-matroska", + "mng": "video/x-mng", + "asf": "video/x-ms-asf", + "asx": "video/x-ms-asf", + "vob": "video/x-ms-vob", + "wm": "video/x-ms-wm", + "wmv": "video/x-ms-wmv", + "wmx": "video/x-ms-wmx", + "wvx": "video/x-ms-wvx", + "avi": "video/x-msvideo", + "movie": "video/x-sgi-movie", + "smv": "video/x-smv", + "ice": "x-conference/x-cooltalk", + "vtt": "text/vtt", + "crx": "application/x-chrome-extension", + "htc": "text/x-component", + "manifest": "text/cache-manifest", + "buffer": "application/octet-stream", + "m4p": "application/mp4", + "m4a": "audio/mp4", + "ts": "video/MP2T", + "event-stream": "text/event-stream", + "webapp": "application/x-web-app-manifest+json", + "lua": "text/x-lua", + "luac": "application/x-lua-bytecode", + "markdown": "text/x-markdown", + "md": "text/x-markdown", + "mkd": "text/x-markdown" +} + , extensions: { + "application/andrew-inset": "ez", + "application/applixware": "aw", + "application/atom+xml": "atom", + "application/atomcat+xml": "atomcat", + "application/atomsvc+xml": "atomsvc", + "application/ccxml+xml": "ccxml", + "application/cdmi-capability": "cdmia", + "application/cdmi-container": "cdmic", + "application/cdmi-domain": "cdmid", + "application/cdmi-object": "cdmio", + "application/cdmi-queue": "cdmiq", + "application/cu-seeme": "cu", + "application/davmount+xml": "davmount", + "application/docbook+xml": "dbk", + "application/dssc+der": "dssc", + "application/dssc+xml": "xdssc", + "application/ecmascript": "ecma", + "application/emma+xml": "emma", + "application/epub+zip": "epub", + "application/exi": "exi", + "application/font-tdpfr": "pfr", + "application/gml+xml": "gml", + "application/gpx+xml": "gpx", + "application/gxf": "gxf", + "application/hyperstudio": "stk", + "application/inkml+xml": "ink", + "application/ipfix": "ipfix", + "application/java-archive": "jar", + "application/java-serialized-object": "ser", + "application/java-vm": "class", + "application/javascript": "js", + "application/json": "json", + "application/jsonml+json": "jsonml", + "application/lost+xml": "lostxml", + "application/mac-binhex40": "hqx", + "application/mac-compactpro": "cpt", + "application/mads+xml": "mads", + "application/marc": "mrc", + "application/marcxml+xml": "mrcx", + "application/mathematica": "ma", + "application/mathml+xml": "mathml", + "application/mbox": "mbox", + "application/mediaservercontrol+xml": "mscml", + "application/metalink+xml": "metalink", + "application/metalink4+xml": "meta4", + "application/mets+xml": "mets", + "application/mods+xml": "mods", + "application/mp21": "m21", + "application/mp4": "mp4s", + "application/msword": "doc", + "application/mxf": "mxf", + "application/octet-stream": "bin", + "application/oda": "oda", + "application/oebps-package+xml": "opf", + "application/ogg": "ogx", + "application/omdoc+xml": "omdoc", + "application/onenote": "onetoc", + "application/oxps": "oxps", + "application/patch-ops-error+xml": "xer", + "application/pdf": "pdf", + "application/pgp-encrypted": "pgp", + "application/pgp-signature": "asc", + "application/pics-rules": "prf", + "application/pkcs10": "p10", + "application/pkcs7-mime": "p7m", + "application/pkcs7-signature": "p7s", + "application/pkcs8": "p8", + "application/pkix-attr-cert": "ac", + "application/pkix-cert": "cer", + "application/pkix-crl": "crl", + "application/pkix-pkipath": "pkipath", + "application/pkixcmp": "pki", + "application/pls+xml": "pls", + "application/postscript": "ai", + "application/prs.cww": "cww", + "application/pskc+xml": "pskcxml", + "application/rdf+xml": "rdf", + "application/reginfo+xml": "rif", + "application/relax-ng-compact-syntax": "rnc", + "application/resource-lists+xml": "rl", + "application/resource-lists-diff+xml": "rld", + "application/rls-services+xml": "rs", + "application/rpki-ghostbusters": "gbr", + "application/rpki-manifest": "mft", + "application/rpki-roa": "roa", + "application/rsd+xml": "rsd", + "application/rss+xml": "rss", + "application/rtf": "rtf", + "application/sbml+xml": "sbml", + "application/scvp-cv-request": "scq", + "application/scvp-cv-response": "scs", + "application/scvp-vp-request": "spq", + "application/scvp-vp-response": "spp", + "application/sdp": "sdp", + "application/set-payment-initiation": "setpay", + "application/set-registration-initiation": "setreg", + "application/shf+xml": "shf", + "application/smil+xml": "smi", + "application/sparql-query": "rq", + "application/sparql-results+xml": "srx", + "application/srgs": "gram", + "application/srgs+xml": "grxml", + "application/sru+xml": "sru", + "application/ssdl+xml": "ssdl", + "application/ssml+xml": "ssml", + "application/tei+xml": "tei", + "application/thraud+xml": "tfi", + "application/timestamped-data": "tsd", + "application/vnd.3gpp.pic-bw-large": "plb", + "application/vnd.3gpp.pic-bw-small": "psb", + "application/vnd.3gpp.pic-bw-var": "pvb", + "application/vnd.3gpp2.tcap": "tcap", + "application/vnd.3m.post-it-notes": "pwn", + "application/vnd.accpac.simply.aso": "aso", + "application/vnd.accpac.simply.imp": "imp", + "application/vnd.acucobol": "acu", + "application/vnd.acucorp": "atc", + "application/vnd.adobe.air-application-installer-package+zip": "air", + "application/vnd.adobe.formscentral.fcdt": "fcdt", + "application/vnd.adobe.fxp": "fxp", + "application/vnd.adobe.xdp+xml": "xdp", + "application/vnd.adobe.xfdf": "xfdf", + "application/vnd.ahead.space": "ahead", + "application/vnd.airzip.filesecure.azf": "azf", + "application/vnd.airzip.filesecure.azs": "azs", + "application/vnd.amazon.ebook": "azw", + "application/vnd.americandynamics.acc": "acc", + "application/vnd.amiga.ami": "ami", + "application/vnd.android.package-archive": "apk", + "application/vnd.anser-web-certificate-issue-initiation": "cii", + "application/vnd.anser-web-funds-transfer-initiation": "fti", + "application/vnd.antix.game-component": "atx", + "application/vnd.apple.installer+xml": "mpkg", + "application/vnd.apple.mpegurl": "m3u8", + "application/vnd.aristanetworks.swi": "swi", + "application/vnd.astraea-software.iota": "iota", + "application/vnd.audiograph": "aep", + "application/vnd.blueice.multipass": "mpm", + "application/vnd.bmi": "bmi", + "application/vnd.businessobjects": "rep", + "application/vnd.chemdraw+xml": "cdxml", + "application/vnd.chipnuts.karaoke-mmd": "mmd", + "application/vnd.cinderella": "cdy", + "application/vnd.claymore": "cla", + "application/vnd.cloanto.rp9": "rp9", + "application/vnd.clonk.c4group": "c4g", + "application/vnd.cluetrust.cartomobile-config": "c11amc", + "application/vnd.cluetrust.cartomobile-config-pkg": "c11amz", + "application/vnd.commonspace": "csp", + "application/vnd.contact.cmsg": "cdbcmsg", + "application/vnd.cosmocaller": "cmc", + "application/vnd.crick.clicker": "clkx", + "application/vnd.crick.clicker.keyboard": "clkk", + "application/vnd.crick.clicker.palette": "clkp", + "application/vnd.crick.clicker.template": "clkt", + "application/vnd.crick.clicker.wordbank": "clkw", + "application/vnd.criticaltools.wbs+xml": "wbs", + "application/vnd.ctc-posml": "pml", + "application/vnd.cups-ppd": "ppd", + "application/vnd.curl.car": "car", + "application/vnd.curl.pcurl": "pcurl", + "application/vnd.dart": "dart", + "application/vnd.data-vision.rdz": "rdz", + "application/vnd.dece.data": "uvf", + "application/vnd.dece.ttml+xml": "uvt", + "application/vnd.dece.unspecified": "uvx", + "application/vnd.dece.zip": "uvz", + "application/vnd.denovo.fcselayout-link": "fe_launch", + "application/vnd.dna": "dna", + "application/vnd.dolby.mlp": "mlp", + "application/vnd.dpgraph": "dpg", + "application/vnd.dreamfactory": "dfac", + "application/vnd.ds-keypoint": "kpxx", + "application/vnd.dvb.ait": "ait", + "application/vnd.dvb.service": "svc", + "application/vnd.dynageo": "geo", + "application/vnd.ecowin.chart": "mag", + "application/vnd.enliven": "nml", + "application/vnd.epson.esf": "esf", + "application/vnd.epson.msf": "msf", + "application/vnd.epson.quickanime": "qam", + "application/vnd.epson.salt": "slt", + "application/vnd.epson.ssf": "ssf", + "application/vnd.eszigno3+xml": "es3", + "application/vnd.ezpix-album": "ez2", + "application/vnd.ezpix-package": "ez3", + "application/vnd.fdf": "fdf", + "application/vnd.fdsn.mseed": "mseed", + "application/vnd.fdsn.seed": "seed", + "application/vnd.flographit": "gph", + "application/vnd.fluxtime.clip": "ftc", + "application/vnd.framemaker": "fm", + "application/vnd.frogans.fnc": "fnc", + "application/vnd.frogans.ltf": "ltf", + "application/vnd.fsc.weblaunch": "fsc", + "application/vnd.fujitsu.oasys": "oas", + "application/vnd.fujitsu.oasys2": "oa2", + "application/vnd.fujitsu.oasys3": "oa3", + "application/vnd.fujitsu.oasysgp": "fg5", + "application/vnd.fujitsu.oasysprs": "bh2", + "application/vnd.fujixerox.ddd": "ddd", + "application/vnd.fujixerox.docuworks": "xdw", + "application/vnd.fujixerox.docuworks.binder": "xbd", + "application/vnd.fuzzysheet": "fzs", + "application/vnd.genomatix.tuxedo": "txd", + "application/vnd.geogebra.file": "ggb", + "application/vnd.geogebra.tool": "ggt", + "application/vnd.geometry-explorer": "gex", + "application/vnd.geonext": "gxt", + "application/vnd.geoplan": "g2w", + "application/vnd.geospace": "g3w", + "application/vnd.gmx": "gmx", + "application/vnd.google-earth.kml+xml": "kml", + "application/vnd.google-earth.kmz": "kmz", + "application/vnd.grafeq": "gqf", + "application/vnd.groove-account": "gac", + "application/vnd.groove-help": "ghf", + "application/vnd.groove-identity-message": "gim", + "application/vnd.groove-injector": "grv", + "application/vnd.groove-tool-message": "gtm", + "application/vnd.groove-tool-template": "tpl", + "application/vnd.groove-vcard": "vcg", + "application/vnd.hal+xml": "hal", + "application/vnd.handheld-entertainment+xml": "zmm", + "application/vnd.hbci": "hbci", + "application/vnd.hhe.lesson-player": "les", + "application/vnd.hp-hpgl": "hpgl", + "application/vnd.hp-hpid": "hpid", + "application/vnd.hp-hps": "hps", + "application/vnd.hp-jlyt": "jlt", + "application/vnd.hp-pcl": "pcl", + "application/vnd.hp-pclxl": "pclxl", + "application/vnd.hydrostatix.sof-data": "sfd-hdstx", + "application/vnd.ibm.minipay": "mpy", + "application/vnd.ibm.modcap": "afp", + "application/vnd.ibm.rights-management": "irm", + "application/vnd.ibm.secure-container": "sc", + "application/vnd.iccprofile": "icc", + "application/vnd.igloader": "igl", + "application/vnd.immervision-ivp": "ivp", + "application/vnd.immervision-ivu": "ivu", + "application/vnd.insors.igm": "igm", + "application/vnd.intercon.formnet": "xpw", + "application/vnd.intergeo": "i2g", + "application/vnd.intu.qbo": "qbo", + "application/vnd.intu.qfx": "qfx", + "application/vnd.ipunplugged.rcprofile": "rcprofile", + "application/vnd.irepository.package+xml": "irp", + "application/vnd.is-xpr": "xpr", + "application/vnd.isac.fcs": "fcs", + "application/vnd.jam": "jam", + "application/vnd.jcp.javame.midlet-rms": "rms", + "application/vnd.jisp": "jisp", + "application/vnd.joost.joda-archive": "joda", + "application/vnd.kahootz": "ktz", + "application/vnd.kde.karbon": "karbon", + "application/vnd.kde.kchart": "chrt", + "application/vnd.kde.kformula": "kfo", + "application/vnd.kde.kivio": "flw", + "application/vnd.kde.kontour": "kon", + "application/vnd.kde.kpresenter": "kpr", + "application/vnd.kde.kspread": "ksp", + "application/vnd.kde.kword": "kwd", + "application/vnd.kenameaapp": "htke", + "application/vnd.kidspiration": "kia", + "application/vnd.kinar": "kne", + "application/vnd.koan": "skp", + "application/vnd.kodak-descriptor": "sse", + "application/vnd.las.las+xml": "lasxml", + "application/vnd.llamagraphics.life-balance.desktop": "lbd", + "application/vnd.llamagraphics.life-balance.exchange+xml": "lbe", + "application/vnd.lotus-1-2-3": "123", + "application/vnd.lotus-approach": "apr", + "application/vnd.lotus-freelance": "pre", + "application/vnd.lotus-notes": "nsf", + "application/vnd.lotus-organizer": "org", + "application/vnd.lotus-screencam": "scm", + "application/vnd.lotus-wordpro": "lwp", + "application/vnd.macports.portpkg": "portpkg", + "application/vnd.mcd": "mcd", + "application/vnd.medcalcdata": "mc1", + "application/vnd.mediastation.cdkey": "cdkey", + "application/vnd.mfer": "mwf", + "application/vnd.mfmp": "mfm", + "application/vnd.micrografx.flo": "flo", + "application/vnd.micrografx.igx": "igx", + "application/vnd.mif": "mif", + "application/vnd.mobius.daf": "daf", + "application/vnd.mobius.dis": "dis", + "application/vnd.mobius.mbk": "mbk", + "application/vnd.mobius.mqy": "mqy", + "application/vnd.mobius.msl": "msl", + "application/vnd.mobius.plc": "plc", + "application/vnd.mobius.txf": "txf", + "application/vnd.mophun.application": "mpn", + "application/vnd.mophun.certificate": "mpc", + "application/vnd.mozilla.xul+xml": "xul", + "application/vnd.ms-artgalry": "cil", + "application/vnd.ms-cab-compressed": "cab", + "application/vnd.ms-excel": "xls", + "application/vnd.ms-excel.addin.macroenabled.12": "xlam", + "application/vnd.ms-excel.sheet.binary.macroenabled.12": "xlsb", + "application/vnd.ms-excel.sheet.macroenabled.12": "xlsm", + "application/vnd.ms-excel.template.macroenabled.12": "xltm", + "application/vnd.ms-fontobject": "eot", + "application/vnd.ms-htmlhelp": "chm", + "application/vnd.ms-ims": "ims", + "application/vnd.ms-lrm": "lrm", + "application/vnd.ms-officetheme": "thmx", + "application/vnd.ms-pki.seccat": "cat", + "application/vnd.ms-pki.stl": "stl", + "application/vnd.ms-powerpoint": "ppt", + "application/vnd.ms-powerpoint.addin.macroenabled.12": "ppam", + "application/vnd.ms-powerpoint.presentation.macroenabled.12": "pptm", + "application/vnd.ms-powerpoint.slide.macroenabled.12": "sldm", + "application/vnd.ms-powerpoint.slideshow.macroenabled.12": "ppsm", + "application/vnd.ms-powerpoint.template.macroenabled.12": "potm", + "application/vnd.ms-project": "mpp", + "application/vnd.ms-word.document.macroenabled.12": "docm", + "application/vnd.ms-word.template.macroenabled.12": "dotm", + "application/vnd.ms-works": "wps", + "application/vnd.ms-wpl": "wpl", + "application/vnd.ms-xpsdocument": "xps", + "application/vnd.mseq": "mseq", + "application/vnd.musician": "mus", + "application/vnd.muvee.style": "msty", + "application/vnd.mynfc": "taglet", + "application/vnd.neurolanguage.nlu": "nlu", + "application/vnd.nitf": "ntf", + "application/vnd.noblenet-directory": "nnd", + "application/vnd.noblenet-sealer": "nns", + "application/vnd.noblenet-web": "nnw", + "application/vnd.nokia.n-gage.data": "ngdat", + "application/vnd.nokia.n-gage.symbian.install": "n-gage", + "application/vnd.nokia.radio-preset": "rpst", + "application/vnd.nokia.radio-presets": "rpss", + "application/vnd.novadigm.edm": "edm", + "application/vnd.novadigm.edx": "edx", + "application/vnd.novadigm.ext": "ext", + "application/vnd.oasis.opendocument.chart": "odc", + "application/vnd.oasis.opendocument.chart-template": "otc", + "application/vnd.oasis.opendocument.database": "odb", + "application/vnd.oasis.opendocument.formula": "odf", + "application/vnd.oasis.opendocument.formula-template": "odft", + "application/vnd.oasis.opendocument.graphics": "odg", + "application/vnd.oasis.opendocument.graphics-template": "otg", + "application/vnd.oasis.opendocument.image": "odi", + "application/vnd.oasis.opendocument.image-template": "oti", + "application/vnd.oasis.opendocument.presentation": "odp", + "application/vnd.oasis.opendocument.presentation-template": "otp", + "application/vnd.oasis.opendocument.spreadsheet": "ods", + "application/vnd.oasis.opendocument.spreadsheet-template": "ots", + "application/vnd.oasis.opendocument.text": "odt", + "application/vnd.oasis.opendocument.text-master": "odm", + "application/vnd.oasis.opendocument.text-template": "ott", + "application/vnd.oasis.opendocument.text-web": "oth", + "application/vnd.olpc-sugar": "xo", + "application/vnd.oma.dd2+xml": "dd2", + "application/vnd.openofficeorg.extension": "oxt", + "application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx", + "application/vnd.openxmlformats-officedocument.presentationml.slide": "sldx", + "application/vnd.openxmlformats-officedocument.presentationml.slideshow": "ppsx", + "application/vnd.openxmlformats-officedocument.presentationml.template": "potx", + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx", + "application/vnd.openxmlformats-officedocument.spreadsheetml.template": "xltx", + "application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx", + "application/vnd.openxmlformats-officedocument.wordprocessingml.template": "dotx", + "application/vnd.osgeo.mapguide.package": "mgp", + "application/vnd.osgi.dp": "dp", + "application/vnd.osgi.subsystem": "esa", + "application/vnd.palm": "pdb", + "application/vnd.pawaafile": "paw", + "application/vnd.pg.format": "str", + "application/vnd.pg.osasli": "ei6", + "application/vnd.picsel": "efif", + "application/vnd.pmi.widget": "wg", + "application/vnd.pocketlearn": "plf", + "application/vnd.powerbuilder6": "pbd", + "application/vnd.previewsystems.box": "box", + "application/vnd.proteus.magazine": "mgz", + "application/vnd.publishare-delta-tree": "qps", + "application/vnd.pvi.ptid1": "ptid", + "application/vnd.quark.quarkxpress": "qxd", + "application/vnd.realvnc.bed": "bed", + "application/vnd.recordare.musicxml": "mxl", + "application/vnd.recordare.musicxml+xml": "musicxml", + "application/vnd.rig.cryptonote": "cryptonote", + "application/vnd.rim.cod": "cod", + "application/vnd.rn-realmedia": "rm", + "application/vnd.rn-realmedia-vbr": "rmvb", + "application/vnd.route66.link66+xml": "link66", + "application/vnd.sailingtracker.track": "st", + "application/vnd.seemail": "see", + "application/vnd.sema": "sema", + "application/vnd.semd": "semd", + "application/vnd.semf": "semf", + "application/vnd.shana.informed.formdata": "ifm", + "application/vnd.shana.informed.formtemplate": "itp", + "application/vnd.shana.informed.interchange": "iif", + "application/vnd.shana.informed.package": "ipk", + "application/vnd.simtech-mindmapper": "twd", + "application/vnd.smaf": "mmf", + "application/vnd.smart.teacher": "teacher", + "application/vnd.solent.sdkm+xml": "sdkm", + "application/vnd.spotfire.dxp": "dxp", + "application/vnd.spotfire.sfs": "sfs", + "application/vnd.stardivision.calc": "sdc", + "application/vnd.stardivision.draw": "sda", + "application/vnd.stardivision.impress": "sdd", + "application/vnd.stardivision.math": "smf", + "application/vnd.stardivision.writer": "sdw", + "application/vnd.stardivision.writer-global": "sgl", + "application/vnd.stepmania.package": "smzip", + "application/vnd.stepmania.stepchart": "sm", + "application/vnd.sun.xml.calc": "sxc", + "application/vnd.sun.xml.calc.template": "stc", + "application/vnd.sun.xml.draw": "sxd", + "application/vnd.sun.xml.draw.template": "std", + "application/vnd.sun.xml.impress": "sxi", + "application/vnd.sun.xml.impress.template": "sti", + "application/vnd.sun.xml.math": "sxm", + "application/vnd.sun.xml.writer": "sxw", + "application/vnd.sun.xml.writer.global": "sxg", + "application/vnd.sun.xml.writer.template": "stw", + "application/vnd.sus-calendar": "sus", + "application/vnd.svd": "svd", + "application/vnd.symbian.install": "sis", + "application/vnd.syncml+xml": "xsm", + "application/vnd.syncml.dm+wbxml": "bdm", + "application/vnd.syncml.dm+xml": "xdm", + "application/vnd.tao.intent-module-archive": "tao", + "application/vnd.tcpdump.pcap": "pcap", + "application/vnd.tmobile-livetv": "tmo", + "application/vnd.trid.tpt": "tpt", + "application/vnd.triscape.mxs": "mxs", + "application/vnd.trueapp": "tra", + "application/vnd.ufdl": "ufd", + "application/vnd.uiq.theme": "utz", + "application/vnd.umajin": "umj", + "application/vnd.unity": "unityweb", + "application/vnd.uoml+xml": "uoml", + "application/vnd.vcx": "vcx", + "application/vnd.visio": "vsd", + "application/vnd.visionary": "vis", + "application/vnd.vsf": "vsf", + "application/vnd.wap.wbxml": "wbxml", + "application/vnd.wap.wmlc": "wmlc", + "application/vnd.wap.wmlscriptc": "wmlsc", + "application/vnd.webturbo": "wtb", + "application/vnd.wolfram.player": "nbp", + "application/vnd.wordperfect": "wpd", + "application/vnd.wqd": "wqd", + "application/vnd.wt.stf": "stf", + "application/vnd.xara": "xar", + "application/vnd.xfdl": "xfdl", + "application/vnd.yamaha.hv-dic": "hvd", + "application/vnd.yamaha.hv-script": "hvs", + "application/vnd.yamaha.hv-voice": "hvp", + "application/vnd.yamaha.openscoreformat": "osf", + "application/vnd.yamaha.openscoreformat.osfpvg+xml": "osfpvg", + "application/vnd.yamaha.smaf-audio": "saf", + "application/vnd.yamaha.smaf-phrase": "spf", + "application/vnd.yellowriver-custom-menu": "cmp", + "application/vnd.zul": "zir", + "application/vnd.zzazz.deck+xml": "zaz", + "application/voicexml+xml": "vxml", + "application/widget": "wgt", + "application/winhlp": "hlp", + "application/wsdl+xml": "wsdl", + "application/wspolicy+xml": "wspolicy", + "application/x-7z-compressed": "7z", + "application/x-abiword": "abw", + "application/x-ace-compressed": "ace", + "application/x-apple-diskimage": "dmg", + "application/x-authorware-bin": "aab", + "application/x-authorware-map": "aam", + "application/x-authorware-seg": "aas", + "application/x-bcpio": "bcpio", + "application/x-bittorrent": "torrent", + "application/x-blorb": "blb", + "application/x-bzip": "bz", + "application/x-bzip2": "bz2", + "application/x-cbr": "cbr", + "application/x-cdlink": "vcd", + "application/x-cfs-compressed": "cfs", + "application/x-chat": "chat", + "application/x-chess-pgn": "pgn", + "application/x-conference": "nsc", + "application/x-cpio": "cpio", + "application/x-csh": "csh", + "application/x-debian-package": "deb", + "application/x-dgc-compressed": "dgc", + "application/x-director": "dir", + "application/x-doom": "wad", + "application/x-dtbncx+xml": "ncx", + "application/x-dtbook+xml": "dtb", + "application/x-dtbresource+xml": "res", + "application/x-dvi": "dvi", + "application/x-envoy": "evy", + "application/x-eva": "eva", + "application/x-font-bdf": "bdf", + "application/x-font-ghostscript": "gsf", + "application/x-font-linux-psf": "psf", + "application/x-font-otf": "otf", + "application/x-font-pcf": "pcf", + "application/x-font-snf": "snf", + "application/x-font-ttf": "ttf", + "application/x-font-type1": "pfa", + "application/x-font-woff": "woff", + "application/x-freearc": "arc", + "application/x-futuresplash": "spl", + "application/x-gca-compressed": "gca", + "application/x-glulx": "ulx", + "application/x-gnumeric": "gnumeric", + "application/x-gramps-xml": "gramps", + "application/x-gtar": "gtar", + "application/x-hdf": "hdf", + "application/x-install-instructions": "install", + "application/x-iso9660-image": "iso", + "application/x-java-jnlp-file": "jnlp", + "application/x-latex": "latex", + "application/x-lzh-compressed": "lzh", + "application/x-mie": "mie", + "application/x-mobipocket-ebook": "prc", + "application/x-ms-application": "application", + "application/x-ms-shortcut": "lnk", + "application/x-ms-wmd": "wmd", + "application/x-ms-wmz": "wmz", + "application/x-ms-xbap": "xbap", + "application/x-msaccess": "mdb", + "application/x-msbinder": "obd", + "application/x-mscardfile": "crd", + "application/x-msclip": "clp", + "application/x-msdownload": "exe", + "application/x-msmediaview": "mvb", + "application/x-msmetafile": "wmf", + "application/x-msmoney": "mny", + "application/x-mspublisher": "pub", + "application/x-msschedule": "scd", + "application/x-msterminal": "trm", + "application/x-mswrite": "wri", + "application/x-netcdf": "nc", + "application/x-nzb": "nzb", + "application/x-pkcs12": "p12", + "application/x-pkcs7-certificates": "p7b", + "application/x-pkcs7-certreqresp": "p7r", + "application/x-rar-compressed": "rar", + "application/x-research-info-systems": "ris", + "application/x-sh": "sh", + "application/x-shar": "shar", + "application/x-shockwave-flash": "swf", + "application/x-silverlight-app": "xap", + "application/x-sql": "sql", + "application/x-stuffit": "sit", + "application/x-stuffitx": "sitx", + "application/x-subrip": "srt", + "application/x-sv4cpio": "sv4cpio", + "application/x-sv4crc": "sv4crc", + "application/x-t3vm-image": "t3", + "application/x-tads": "gam", + "application/x-tar": "tar", + "application/x-tcl": "tcl", + "application/x-tex": "tex", + "application/x-tex-tfm": "tfm", + "application/x-texinfo": "texinfo", + "application/x-tgif": "obj", + "application/x-ustar": "ustar", + "application/x-wais-source": "src", + "application/x-x509-ca-cert": "der", + "application/x-xfig": "fig", + "application/x-xliff+xml": "xlf", + "application/x-xpinstall": "xpi", + "application/x-xz": "xz", + "application/x-zmachine": "z1", + "application/xaml+xml": "xaml", + "application/xcap-diff+xml": "xdf", + "application/xenc+xml": "xenc", + "application/xhtml+xml": "xhtml", + "application/xml": "xml", + "application/xml-dtd": "dtd", + "application/xop+xml": "xop", + "application/xproc+xml": "xpl", + "application/xslt+xml": "xslt", + "application/xspf+xml": "xspf", + "application/xv+xml": "mxml", + "application/yang": "yang", + "application/yin+xml": "yin", + "application/zip": "zip", + "audio/adpcm": "adp", + "audio/basic": "au", + "audio/midi": "mid", + "audio/mp4": "mp4a", + "audio/mpeg": "mpga", + "audio/ogg": "oga", + "audio/s3m": "s3m", + "audio/silk": "sil", + "audio/vnd.dece.audio": "uva", + "audio/vnd.digital-winds": "eol", + "audio/vnd.dra": "dra", + "audio/vnd.dts": "dts", + "audio/vnd.dts.hd": "dtshd", + "audio/vnd.lucent.voice": "lvp", + "audio/vnd.ms-playready.media.pya": "pya", + "audio/vnd.nuera.ecelp4800": "ecelp4800", + "audio/vnd.nuera.ecelp7470": "ecelp7470", + "audio/vnd.nuera.ecelp9600": "ecelp9600", + "audio/vnd.rip": "rip", + "audio/webm": "weba", + "audio/x-aac": "aac", + "audio/x-aiff": "aif", + "audio/x-caf": "caf", + "audio/x-flac": "flac", + "audio/x-matroska": "mka", + "audio/x-mpegurl": "m3u", + "audio/x-ms-wax": "wax", + "audio/x-ms-wma": "wma", + "audio/x-pn-realaudio": "ram", + "audio/x-pn-realaudio-plugin": "rmp", + "audio/x-wav": "wav", + "audio/xm": "xm", + "chemical/x-cdx": "cdx", + "chemical/x-cif": "cif", + "chemical/x-cmdf": "cmdf", + "chemical/x-cml": "cml", + "chemical/x-csml": "csml", + "chemical/x-xyz": "xyz", + "image/bmp": "bmp", + "image/cgm": "cgm", + "image/g3fax": "g3", + "image/gif": "gif", + "image/ief": "ief", + "image/jpeg": "jpeg", + "image/ktx": "ktx", + "image/png": "png", + "image/prs.btif": "btif", + "image/sgi": "sgi", + "image/svg+xml": "svg", + "image/tiff": "tiff", + "image/vnd.adobe.photoshop": "psd", + "image/vnd.dece.graphic": "uvi", + "image/vnd.dvb.subtitle": "sub", + "image/vnd.djvu": "djvu", + "image/vnd.dwg": "dwg", + "image/vnd.dxf": "dxf", + "image/vnd.fastbidsheet": "fbs", + "image/vnd.fpx": "fpx", + "image/vnd.fst": "fst", + "image/vnd.fujixerox.edmics-mmr": "mmr", + "image/vnd.fujixerox.edmics-rlc": "rlc", + "image/vnd.ms-modi": "mdi", + "image/vnd.ms-photo": "wdp", + "image/vnd.net-fpx": "npx", + "image/vnd.wap.wbmp": "wbmp", + "image/vnd.xiff": "xif", + "image/webp": "webp", + "image/x-3ds": "3ds", + "image/x-cmu-raster": "ras", + "image/x-cmx": "cmx", + "image/x-freehand": "fh", + "image/x-icon": "ico", + "image/x-mrsid-image": "sid", + "image/x-pcx": "pcx", + "image/x-pict": "pic", + "image/x-portable-anymap": "pnm", + "image/x-portable-bitmap": "pbm", + "image/x-portable-graymap": "pgm", + "image/x-portable-pixmap": "ppm", + "image/x-rgb": "rgb", + "image/x-tga": "tga", + "image/x-xbitmap": "xbm", + "image/x-xpixmap": "xpm", + "image/x-xwindowdump": "xwd", + "message/rfc822": "eml", + "model/iges": "igs", + "model/mesh": "msh", + "model/vnd.collada+xml": "dae", + "model/vnd.dwf": "dwf", + "model/vnd.gdl": "gdl", + "model/vnd.gtw": "gtw", + "model/vnd.mts": "mts", + "model/vnd.vtu": "vtu", + "model/vrml": "wrl", + "model/x3d+binary": "x3db", + "model/x3d+vrml": "x3dv", + "model/x3d+xml": "x3d", + "text/cache-manifest": "appcache", + "text/calendar": "ics", + "text/css": "css", + "text/csv": "csv", + "text/html": "html", + "text/n3": "n3", + "text/plain": "txt", + "text/prs.lines.tag": "dsc", + "text/richtext": "rtx", + "text/sgml": "sgml", + "text/tab-separated-values": "tsv", + "text/troff": "t", + "text/turtle": "ttl", + "text/uri-list": "uri", + "text/vcard": "vcard", + "text/vnd.curl": "curl", + "text/vnd.curl.dcurl": "dcurl", + "text/vnd.curl.scurl": "scurl", + "text/vnd.curl.mcurl": "mcurl", + "text/vnd.dvb.subtitle": "sub", + "text/vnd.fly": "fly", + "text/vnd.fmi.flexstor": "flx", + "text/vnd.graphviz": "gv", + "text/vnd.in3d.3dml": "3dml", + "text/vnd.in3d.spot": "spot", + "text/vnd.sun.j2me.app-descriptor": "jad", + "text/vnd.wap.wml": "wml", + "text/vnd.wap.wmlscript": "wmls", + "text/x-asm": "s", + "text/x-c": "c", + "text/x-fortran": "f", + "text/x-java-source": "java", + "text/x-opml": "opml", + "text/x-pascal": "p", + "text/x-nfo": "nfo", + "text/x-setext": "etx", + "text/x-sfv": "sfv", + "text/x-uuencode": "uu", + "text/x-vcalendar": "vcs", + "text/x-vcard": "vcf", + "video/3gpp": "3gp", + "video/3gpp2": "3g2", + "video/h261": "h261", + "video/h263": "h263", + "video/h264": "h264", + "video/jpeg": "jpgv", + "video/jpm": "jpm", + "video/mj2": "mj2", + "video/mp4": "mp4", + "video/mpeg": "mpeg", + "video/ogg": "ogv", + "video/quicktime": "qt", + "video/vnd.dece.hd": "uvh", + "video/vnd.dece.mobile": "uvm", + "video/vnd.dece.pd": "uvp", + "video/vnd.dece.sd": "uvs", + "video/vnd.dece.video": "uvv", + "video/vnd.dvb.file": "dvb", + "video/vnd.fvt": "fvt", + "video/vnd.mpegurl": "mxu", + "video/vnd.ms-playready.media.pyv": "pyv", + "video/vnd.uvvu.mp4": "uvu", + "video/vnd.vivo": "viv", + "video/webm": "webm", + "video/x-f4v": "f4v", + "video/x-fli": "fli", + "video/x-flv": "flv", + "video/x-m4v": "m4v", + "video/x-matroska": "mkv", + "video/x-mng": "mng", + "video/x-ms-asf": "asf", + "video/x-ms-vob": "vob", + "video/x-ms-wm": "wm", + "video/x-ms-wmv": "wmv", + "video/x-ms-wmx": "wmx", + "video/x-ms-wvx": "wvx", + "video/x-msvideo": "avi", + "video/x-sgi-movie": "movie", + "video/x-smv": "smv", + "x-conference/x-cooltalk": "ice", + "text/vtt": "vtt", + "application/x-chrome-extension": "crx", + "text/x-component": "htc", + "video/MP2T": "ts", + "text/event-stream": "event-stream", + "application/x-web-app-manifest+json": "webapp", + "text/x-lua": "lua", + "application/x-lua-bytecode": "luac", + "text/x-markdown": "markdown" +} + , extension: function (mimeType) { + var type = mimeType.match(/^\s*([^;\s]*)(?:;|\s|$)/)[1].toLowerCase(); + return this.extensions[type]; +} + , define: function (map) { + for (var type in map) { + var exts = map[type]; + + for (var i = 0; i < exts.length; i++) { + if (false && this.types[exts]) { + console.warn(this._loading.replace(/.*\//, ''), 'changes "' + exts[i] + '" extension type from ' + + this.types[exts] + ' to ' + type); + } + + this.types[exts[i]] = type; + } + + // Default extension is the first one we encounter + if (!this.extensions[type]) { + this.extensions[type] = exts[0]; + } + } +} + , charsets: {lookup: function (mimeType, fallback) { + // Assume text types are utf8 + return (/^text\//).test(mimeType) ? 'UTF-8' : fallback; + }} +} +mime.types.constructor = undefined +mime.extensions.constructor = undefined \ No newline at end of file diff --git a/src/node_modules/browserify-mime/mime.js b/src/node_modules/browserify-mime/mime.js new file mode 100644 index 0000000..8a7eb09 --- /dev/null +++ b/src/node_modules/browserify-mime/mime.js @@ -0,0 +1,114 @@ +var path = require('path'); +var fs = require('fs'); + +function Mime() { + // Map of extension -> mime type + this.types = Object.create(null); + + // Map of mime type -> extension + this.extensions = Object.create(null); +} + +/** + * Define mimetype -> extension mappings. Each key is a mime-type that maps + * to an array of extensions associated with the type. The first extension is + * used as the default extension for the type. + * + * e.g. mime.define({'audio/ogg', ['oga', 'ogg', 'spx']}); + * + * @param map (Object) type definitions + */ +Mime.prototype.define = function (map) { + for (var type in map) { + var exts = map[type]; + + for (var i = 0; i < exts.length; i++) { + if (process.env.DEBUG_MIME && this.types[exts]) { + console.warn(this._loading.replace(/.*\//, ''), 'changes "' + exts[i] + '" extension type from ' + + this.types[exts] + ' to ' + type); + } + + this.types[exts[i]] = type; + } + + // Default extension is the first one we encounter + if (!this.extensions[type]) { + this.extensions[type] = exts[0]; + } + } +}; + +/** + * Load an Apache2-style ".types" file + * + * This may be called multiple times (it's expected). Where files declare + * overlapping types/extensions, the last file wins. + * + * @param file (String) path of file to load. + */ +Mime.prototype.load = function(file) { + + this._loading = file; + // Read file and split into lines + var map = {}, + content = fs.readFileSync(file, 'ascii'), + lines = content.split(/[\r\n]+/); + + lines.forEach(function(line) { + // Clean up whitespace/comments, and split into fields + var fields = line.replace(/\s*#.*|^\s*|\s*$/g, '').split(/\s+/); + map[fields.shift()] = fields; + }); + + this.define(map); + + this._loading = null; +}; + +/** + * Lookup a mime type based on extension + */ +Mime.prototype.lookup = function(path, fallback) { + var ext = path.replace(/.*[\.\/]/, '').toLowerCase(); + + return this.types[ext] || fallback || this.default_type; +}; + +/** + * Return file extension associated with a mime type + */ +Mime.prototype.extension = function(mimeType) { + var type = mimeType.match(/^\s*([^;\s]*)(?:;|\s|$)/)[1].toLowerCase(); + return this.extensions[type]; +}; + +// Default instance +var mime = new Mime(); + +// Load local copy of +// http://svn.apache.org/repos/asf/httpd/httpd/trunk/docs/conf/mime.types +mime.load(path.join(__dirname, 'types/mime.types')); + +// Load additional types from node.js community +mime.load(path.join(__dirname, 'types/node.types')); + +// Default type +mime.default_type = mime.lookup('bin'); + +// +// Additional API specific to the default instance +// + +mime.Mime = Mime; + +/** + * Lookup a charset based on mime type. + */ +mime.charsets = { + lookup: function(mimeType, fallback) { + // Assume text types are utf8 + return (/^text\//).test(mimeType) ? 'UTF-8' : fallback; + } +}; + +module.exports = mime; diff --git a/src/node_modules/browserify-mime/package.json b/src/node_modules/browserify-mime/package.json new file mode 100644 index 0000000..ccfd3d7 --- /dev/null +++ b/src/node_modules/browserify-mime/package.json @@ -0,0 +1,61 @@ +{ + "_args": [ + [ + "browserify-mime@1.2.9", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "browserify-mime@1.2.9", + "_id": "browserify-mime@1.2.9", + "_inBundle": false, + "_integrity": "sha1-rrGvKN5sDXpqLOQK22j/GEIq8x8=", + "_location": "/browserify-mime", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "browserify-mime@1.2.9", + "name": "browserify-mime", + "escapedName": "browserify-mime", + "rawSpec": "1.2.9", + "saveSpec": null, + "fetchSpec": "1.2.9" + }, + "_requiredBy": [ + "/azure-storage" + ], + "_resolved": "https://registry.npmjs.org/browserify-mime/-/browserify-mime-1.2.9.tgz", + "_spec": "1.2.9", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Robert Kieffer", + "email": "robert@broofa.com", + "url": "http://github.com/broofa" + }, + "bugs": { + "url": "https://github.com/broofa/node-mime/issues" + }, + "contributors": [ + { + "name": "Benjamin Thomas", + "email": "benjamin@benjaminthomas.org", + "url": "http://github.com/bentomas" + } + ], + "dependencies": {}, + "description": "A comprehensive library for mime-type mapping (with browserify support)", + "devDependencies": {}, + "homepage": "https://github.com/broofa/node-mime#readme", + "keywords": [ + "util", + "mime", + "browserify" + ], + "main": "browserify-mime.js", + "name": "browserify-mime", + "repository": { + "url": "git+https://github.com/broofa/node-mime.git", + "type": "git" + }, + "version": "1.2.9" +} diff --git a/src/node_modules/browserify-mime/test.js b/src/node_modules/browserify-mime/test.js new file mode 100644 index 0000000..1932ce7 --- /dev/null +++ b/src/node_modules/browserify-mime/test.js @@ -0,0 +1,62 @@ +/** + * Usage: node test.js + */ + +var mime = require('./browserify-mime'); +var assert = require('assert'); + +function eq(a, b) { + console.log('Test: ' + a + ' === ' + b); + assert.strictEqual.apply(null, arguments); +} + +console.log(Object.keys(mime.extensions).length + ' types'); +console.log(Object.keys(mime.types).length + ' extensions\n'); + +// +// Test mime lookups +// + +eq('text/plain', mime.lookup('text.txt')); +eq('text/plain', mime.lookup('.text.txt')); +eq('text/plain', mime.lookup('.txt')); +eq('text/plain', mime.lookup('txt')); +eq('application/octet-stream', mime.lookup('text.nope')); +eq('fallback', mime.lookup('text.fallback', 'fallback')); +eq('application/octet-stream', mime.lookup('constructor')); +eq('text/plain', mime.lookup('TEXT.TXT')); +eq('text/event-stream', mime.lookup('text/event-stream')); +eq('application/x-web-app-manifest+json', mime.lookup('text.webapp')); + +// +// Test extensions +// + +eq('txt', mime.extension(mime.types.text)); +eq('html', mime.extension(mime.types.htm)); +eq('bin', mime.extension('application/octet-stream')); +eq('bin', mime.extension('application/octet-stream ')); +eq('html', mime.extension(' text/html; charset=UTF-8')); +eq('html', mime.extension('text/html; charset=UTF-8 ')); +eq('html', mime.extension('text/html; charset=UTF-8')); +eq('html', mime.extension('text/html ; charset=UTF-8')); +eq('html', mime.extension('text/html;charset=UTF-8')); +eq('html', mime.extension('text/Html;charset=UTF-8')); +eq(undefined, mime.extension('constructor')); + +// +// Test node types +// + +eq('application/octet-stream', mime.lookup('file.buffer')); +eq('audio/mp4', mime.lookup('file.m4a')); + +// +// Test charsets +// + +eq('UTF-8', mime.charsets.lookup('text/plain')); +eq(undefined, mime.charsets.lookup(mime.types.js)); +eq('fallback', mime.charsets.lookup('application/octet-stream', 'fallback')); + +console.log('\nOK'); diff --git a/src/node_modules/caseless/LICENSE b/src/node_modules/caseless/LICENSE new file mode 100644 index 0000000..61789f4 --- /dev/null +++ b/src/node_modules/caseless/LICENSE @@ -0,0 +1,28 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +1. Definitions. +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: +You must give any other recipients of the Work or Derivative Works a copy of this License; and +You must cause any modified files to carry prominent notices stating that You changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/src/node_modules/caseless/README.md b/src/node_modules/caseless/README.md new file mode 100644 index 0000000..e5077a2 --- /dev/null +++ b/src/node_modules/caseless/README.md @@ -0,0 +1,45 @@ +## Caseless -- wrap an object to set and get property with caseless semantics but also preserve caseing. + +This library is incredibly useful when working with HTTP headers. It allows you to get/set/check for headers in a caseless manner while also preserving the caseing of headers the first time they are set. + +## Usage + +```javascript +var headers = {} + , c = caseless(headers) + ; +c.set('a-Header', 'asdf') +c.get('a-header') === 'asdf' +``` + +## has(key) + +Has takes a name and if it finds a matching header will return that header name with the preserved caseing it was set with. + +```javascript +c.has('a-header') === 'a-Header' +``` + +## set(key, value[, clobber=true]) + +Set is fairly straight forward except that if the header exists and clobber is disabled it will add `','+value` to the existing header. + +```javascript +c.set('a-Header', 'fdas') +c.set('a-HEADER', 'more', false) +c.get('a-header') === 'fdsa,more' +``` + +## swap(key) + +Swaps the casing of a header with the new one that is passed in. + +```javascript +var headers = {} + , c = caseless(headers) + ; +c.set('a-Header', 'fdas') +c.swap('a-HEADER') +c.has('a-header') === 'a-HEADER' +headers === {'a-HEADER': 'fdas'} +``` diff --git a/src/node_modules/caseless/index.js b/src/node_modules/caseless/index.js new file mode 100644 index 0000000..b194734 --- /dev/null +++ b/src/node_modules/caseless/index.js @@ -0,0 +1,67 @@ +function Caseless (dict) { + this.dict = dict || {} +} +Caseless.prototype.set = function (name, value, clobber) { + if (typeof name === 'object') { + for (var i in name) { + this.set(i, name[i], value) + } + } else { + if (typeof clobber === 'undefined') clobber = true + var has = this.has(name) + + if (!clobber && has) this.dict[has] = this.dict[has] + ',' + value + else this.dict[has || name] = value + return has + } +} +Caseless.prototype.has = function (name) { + var keys = Object.keys(this.dict) + , name = name.toLowerCase() + ; + for (var i=0;i + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/src/node_modules/combined-stream/Readme.md b/src/node_modules/combined-stream/Readme.md new file mode 100644 index 0000000..9e367b5 --- /dev/null +++ b/src/node_modules/combined-stream/Readme.md @@ -0,0 +1,138 @@ +# combined-stream + +A stream that emits multiple other streams one after another. + +**NB** Currently `combined-stream` works with streams version 1 only. There is ongoing effort to switch this library to streams version 2. Any help is welcome. :) Meanwhile you can explore other libraries that provide streams2 support with more or less compatibility with `combined-stream`. + +- [combined-stream2](https://www.npmjs.com/package/combined-stream2): A drop-in streams2-compatible replacement for the combined-stream module. + +- [multistream](https://www.npmjs.com/package/multistream): A stream that emits multiple other streams one after another. + +## Installation + +``` bash +npm install combined-stream +``` + +## Usage + +Here is a simple example that shows how you can use combined-stream to combine +two files into one: + +``` javascript +var CombinedStream = require('combined-stream'); +var fs = require('fs'); + +var combinedStream = CombinedStream.create(); +combinedStream.append(fs.createReadStream('file1.txt')); +combinedStream.append(fs.createReadStream('file2.txt')); + +combinedStream.pipe(fs.createWriteStream('combined.txt')); +``` + +While the example above works great, it will pause all source streams until +they are needed. If you don't want that to happen, you can set `pauseStreams` +to `false`: + +``` javascript +var CombinedStream = require('combined-stream'); +var fs = require('fs'); + +var combinedStream = CombinedStream.create({pauseStreams: false}); +combinedStream.append(fs.createReadStream('file1.txt')); +combinedStream.append(fs.createReadStream('file2.txt')); + +combinedStream.pipe(fs.createWriteStream('combined.txt')); +``` + +However, what if you don't have all the source streams yet, or you don't want +to allocate the resources (file descriptors, memory, etc.) for them right away? +Well, in that case you can simply provide a callback that supplies the stream +by calling a `next()` function: + +``` javascript +var CombinedStream = require('combined-stream'); +var fs = require('fs'); + +var combinedStream = CombinedStream.create(); +combinedStream.append(function(next) { + next(fs.createReadStream('file1.txt')); +}); +combinedStream.append(function(next) { + next(fs.createReadStream('file2.txt')); +}); + +combinedStream.pipe(fs.createWriteStream('combined.txt')); +``` + +## API + +### CombinedStream.create([options]) + +Returns a new combined stream object. Available options are: + +* `maxDataSize` +* `pauseStreams` + +The effect of those options is described below. + +### combinedStream.pauseStreams = `true` + +Whether to apply back pressure to the underlaying streams. If set to `false`, +the underlaying streams will never be paused. If set to `true`, the +underlaying streams will be paused right after being appended, as well as when +`delayedStream.pipe()` wants to throttle. + +### combinedStream.maxDataSize = `2 * 1024 * 1024` + +The maximum amount of bytes (or characters) to buffer for all source streams. +If this value is exceeded, `combinedStream` emits an `'error'` event. + +### combinedStream.dataSize = `0` + +The amount of bytes (or characters) currently buffered by `combinedStream`. + +### combinedStream.append(stream) + +Appends the given `stream` to the combinedStream object. If `pauseStreams` is +set to `true, this stream will also be paused right away. + +`streams` can also be a function that takes one parameter called `next`. `next` +is a function that must be invoked in order to provide the `next` stream, see +example above. + +Regardless of how the `stream` is appended, combined-stream always attaches an +`'error'` listener to it, so you don't have to do that manually. + +Special case: `stream` can also be a String or Buffer. + +### combinedStream.write(data) + +You should not call this, `combinedStream` takes care of piping the appended +streams into itself for you. + +### combinedStream.resume() + +Causes `combinedStream` to start drain the streams it manages. The function is +idempotent, and also emits a `'resume'` event each time which usually goes to +the stream that is currently being drained. + +### combinedStream.pause(); + +If `combinedStream.pauseStreams` is set to `false`, this does nothing. +Otherwise a `'pause'` event is emitted, this goes to the stream that is +currently being drained, so you can use it to apply back pressure. + +### combinedStream.end(); + +Sets `combinedStream.writable` to false, emits an `'end'` event, and removes +all streams from the queue. + +### combinedStream.destroy(); + +Same as `combinedStream.end()`, except it emits a `'close'` event instead of +`'end'`. + +## License + +combined-stream is licensed under the MIT license. diff --git a/src/node_modules/combined-stream/lib/combined_stream.js b/src/node_modules/combined-stream/lib/combined_stream.js new file mode 100644 index 0000000..125f097 --- /dev/null +++ b/src/node_modules/combined-stream/lib/combined_stream.js @@ -0,0 +1,208 @@ +var util = require('util'); +var Stream = require('stream').Stream; +var DelayedStream = require('delayed-stream'); + +module.exports = CombinedStream; +function CombinedStream() { + this.writable = false; + this.readable = true; + this.dataSize = 0; + this.maxDataSize = 2 * 1024 * 1024; + this.pauseStreams = true; + + this._released = false; + this._streams = []; + this._currentStream = null; + this._insideLoop = false; + this._pendingNext = false; +} +util.inherits(CombinedStream, Stream); + +CombinedStream.create = function(options) { + var combinedStream = new this(); + + options = options || {}; + for (var option in options) { + combinedStream[option] = options[option]; + } + + return combinedStream; +}; + +CombinedStream.isStreamLike = function(stream) { + return (typeof stream !== 'function') + && (typeof stream !== 'string') + && (typeof stream !== 'boolean') + && (typeof stream !== 'number') + && (!Buffer.isBuffer(stream)); +}; + +CombinedStream.prototype.append = function(stream) { + var isStreamLike = CombinedStream.isStreamLike(stream); + + if (isStreamLike) { + if (!(stream instanceof DelayedStream)) { + var newStream = DelayedStream.create(stream, { + maxDataSize: Infinity, + pauseStream: this.pauseStreams, + }); + stream.on('data', this._checkDataSize.bind(this)); + stream = newStream; + } + + this._handleErrors(stream); + + if (this.pauseStreams) { + stream.pause(); + } + } + + this._streams.push(stream); + return this; +}; + +CombinedStream.prototype.pipe = function(dest, options) { + Stream.prototype.pipe.call(this, dest, options); + this.resume(); + return dest; +}; + +CombinedStream.prototype._getNext = function() { + this._currentStream = null; + + if (this._insideLoop) { + this._pendingNext = true; + return; // defer call + } + + this._insideLoop = true; + try { + do { + this._pendingNext = false; + this._realGetNext(); + } while (this._pendingNext); + } finally { + this._insideLoop = false; + } +}; + +CombinedStream.prototype._realGetNext = function() { + var stream = this._streams.shift(); + + + if (typeof stream == 'undefined') { + this.end(); + return; + } + + if (typeof stream !== 'function') { + this._pipeNext(stream); + return; + } + + var getStream = stream; + getStream(function(stream) { + var isStreamLike = CombinedStream.isStreamLike(stream); + if (isStreamLike) { + stream.on('data', this._checkDataSize.bind(this)); + this._handleErrors(stream); + } + + this._pipeNext(stream); + }.bind(this)); +}; + +CombinedStream.prototype._pipeNext = function(stream) { + this._currentStream = stream; + + var isStreamLike = CombinedStream.isStreamLike(stream); + if (isStreamLike) { + stream.on('end', this._getNext.bind(this)); + stream.pipe(this, {end: false}); + return; + } + + var value = stream; + this.write(value); + this._getNext(); +}; + +CombinedStream.prototype._handleErrors = function(stream) { + var self = this; + stream.on('error', function(err) { + self._emitError(err); + }); +}; + +CombinedStream.prototype.write = function(data) { + this.emit('data', data); +}; + +CombinedStream.prototype.pause = function() { + if (!this.pauseStreams) { + return; + } + + if(this.pauseStreams && this._currentStream && typeof(this._currentStream.pause) == 'function') this._currentStream.pause(); + this.emit('pause'); +}; + +CombinedStream.prototype.resume = function() { + if (!this._released) { + this._released = true; + this.writable = true; + this._getNext(); + } + + if(this.pauseStreams && this._currentStream && typeof(this._currentStream.resume) == 'function') this._currentStream.resume(); + this.emit('resume'); +}; + +CombinedStream.prototype.end = function() { + this._reset(); + this.emit('end'); +}; + +CombinedStream.prototype.destroy = function() { + this._reset(); + this.emit('close'); +}; + +CombinedStream.prototype._reset = function() { + this.writable = false; + this._streams = []; + this._currentStream = null; +}; + +CombinedStream.prototype._checkDataSize = function() { + this._updateDataSize(); + if (this.dataSize <= this.maxDataSize) { + return; + } + + var message = + 'DelayedStream#maxDataSize of ' + this.maxDataSize + ' bytes exceeded.'; + this._emitError(new Error(message)); +}; + +CombinedStream.prototype._updateDataSize = function() { + this.dataSize = 0; + + var self = this; + this._streams.forEach(function(stream) { + if (!stream.dataSize) { + return; + } + + self.dataSize += stream.dataSize; + }); + + if (this._currentStream && this._currentStream.dataSize) { + this.dataSize += this._currentStream.dataSize; + } +}; + +CombinedStream.prototype._emitError = function(err) { + this._reset(); + this.emit('error', err); +}; diff --git a/src/node_modules/combined-stream/package.json b/src/node_modules/combined-stream/package.json new file mode 100644 index 0000000..d757071 --- /dev/null +++ b/src/node_modules/combined-stream/package.json @@ -0,0 +1,61 @@ +{ + "_args": [ + [ + "combined-stream@1.0.8", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "combined-stream@1.0.8", + "_id": "combined-stream@1.0.8", + "_inBundle": false, + "_integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "_location": "/combined-stream", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "combined-stream@1.0.8", + "name": "combined-stream", + "escapedName": "combined-stream", + "rawSpec": "1.0.8", + "saveSpec": null, + "fetchSpec": "1.0.8" + }, + "_requiredBy": [ + "/form-data", + "/request" + ], + "_resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "_spec": "1.0.8", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Felix Geisendörfer", + "email": "felix@debuggable.com", + "url": "http://debuggable.com/" + }, + "bugs": { + "url": "https://github.com/felixge/node-combined-stream/issues" + }, + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "description": "A stream that emits multiple other streams one after another.", + "devDependencies": { + "far": "~0.0.7" + }, + "engines": { + "node": ">= 0.8" + }, + "homepage": "https://github.com/felixge/node-combined-stream", + "license": "MIT", + "main": "./lib/combined_stream", + "name": "combined-stream", + "repository": { + "type": "git", + "url": "git://github.com/felixge/node-combined-stream.git" + }, + "scripts": { + "test": "node test/run.js" + }, + "version": "1.0.8" +} diff --git a/src/node_modules/combined-stream/yarn.lock b/src/node_modules/combined-stream/yarn.lock new file mode 100644 index 0000000..7edf418 --- /dev/null +++ b/src/node_modules/combined-stream/yarn.lock @@ -0,0 +1,17 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + +far@~0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/far/-/far-0.0.7.tgz#01c1fd362bcd26ce9cf161af3938aa34619f79a7" + dependencies: + oop "0.0.3" + +oop@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/oop/-/oop-0.0.3.tgz#70fa405a5650891a194fdc82ca68dad6dabf4401" diff --git a/src/node_modules/core-util-is/LICENSE b/src/node_modules/core-util-is/LICENSE new file mode 100644 index 0000000..d8d7f94 --- /dev/null +++ b/src/node_modules/core-util-is/LICENSE @@ -0,0 +1,19 @@ +Copyright Node.js contributors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/src/node_modules/core-util-is/README.md b/src/node_modules/core-util-is/README.md new file mode 100644 index 0000000..5a76b41 --- /dev/null +++ b/src/node_modules/core-util-is/README.md @@ -0,0 +1,3 @@ +# core-util-is + +The `util.is*` functions introduced in Node v0.12. diff --git a/src/node_modules/core-util-is/float.patch b/src/node_modules/core-util-is/float.patch new file mode 100644 index 0000000..a06d5c0 --- /dev/null +++ b/src/node_modules/core-util-is/float.patch @@ -0,0 +1,604 @@ +diff --git a/lib/util.js b/lib/util.js +index a03e874..9074e8e 100644 +--- a/lib/util.js ++++ b/lib/util.js +@@ -19,430 +19,6 @@ + // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + // USE OR OTHER DEALINGS IN THE SOFTWARE. + +-var formatRegExp = /%[sdj%]/g; +-exports.format = function(f) { +- if (!isString(f)) { +- var objects = []; +- for (var i = 0; i < arguments.length; i++) { +- objects.push(inspect(arguments[i])); +- } +- return objects.join(' '); +- } +- +- var i = 1; +- var args = arguments; +- var len = args.length; +- var str = String(f).replace(formatRegExp, function(x) { +- if (x === '%%') return '%'; +- if (i >= len) return x; +- switch (x) { +- case '%s': return String(args[i++]); +- case '%d': return Number(args[i++]); +- case '%j': +- try { +- return JSON.stringify(args[i++]); +- } catch (_) { +- return '[Circular]'; +- } +- default: +- return x; +- } +- }); +- for (var x = args[i]; i < len; x = args[++i]) { +- if (isNull(x) || !isObject(x)) { +- str += ' ' + x; +- } else { +- str += ' ' + inspect(x); +- } +- } +- return str; +-}; +- +- +-// Mark that a method should not be used. +-// Returns a modified function which warns once by default. +-// If --no-deprecation is set, then it is a no-op. +-exports.deprecate = function(fn, msg) { +- // Allow for deprecating things in the process of starting up. +- if (isUndefined(global.process)) { +- return function() { +- return exports.deprecate(fn, msg).apply(this, arguments); +- }; +- } +- +- if (process.noDeprecation === true) { +- return fn; +- } +- +- var warned = false; +- function deprecated() { +- if (!warned) { +- if (process.throwDeprecation) { +- throw new Error(msg); +- } else if (process.traceDeprecation) { +- console.trace(msg); +- } else { +- console.error(msg); +- } +- warned = true; +- } +- return fn.apply(this, arguments); +- } +- +- return deprecated; +-}; +- +- +-var debugs = {}; +-var debugEnviron; +-exports.debuglog = function(set) { +- if (isUndefined(debugEnviron)) +- debugEnviron = process.env.NODE_DEBUG || ''; +- set = set.toUpperCase(); +- if (!debugs[set]) { +- if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) { +- var pid = process.pid; +- debugs[set] = function() { +- var msg = exports.format.apply(exports, arguments); +- console.error('%s %d: %s', set, pid, msg); +- }; +- } else { +- debugs[set] = function() {}; +- } +- } +- return debugs[set]; +-}; +- +- +-/** +- * Echos the value of a value. Trys to print the value out +- * in the best way possible given the different types. +- * +- * @param {Object} obj The object to print out. +- * @param {Object} opts Optional options object that alters the output. +- */ +-/* legacy: obj, showHidden, depth, colors*/ +-function inspect(obj, opts) { +- // default options +- var ctx = { +- seen: [], +- stylize: stylizeNoColor +- }; +- // legacy... +- if (arguments.length >= 3) ctx.depth = arguments[2]; +- if (arguments.length >= 4) ctx.colors = arguments[3]; +- if (isBoolean(opts)) { +- // legacy... +- ctx.showHidden = opts; +- } else if (opts) { +- // got an "options" object +- exports._extend(ctx, opts); +- } +- // set default options +- if (isUndefined(ctx.showHidden)) ctx.showHidden = false; +- if (isUndefined(ctx.depth)) ctx.depth = 2; +- if (isUndefined(ctx.colors)) ctx.colors = false; +- if (isUndefined(ctx.customInspect)) ctx.customInspect = true; +- if (ctx.colors) ctx.stylize = stylizeWithColor; +- return formatValue(ctx, obj, ctx.depth); +-} +-exports.inspect = inspect; +- +- +-// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics +-inspect.colors = { +- 'bold' : [1, 22], +- 'italic' : [3, 23], +- 'underline' : [4, 24], +- 'inverse' : [7, 27], +- 'white' : [37, 39], +- 'grey' : [90, 39], +- 'black' : [30, 39], +- 'blue' : [34, 39], +- 'cyan' : [36, 39], +- 'green' : [32, 39], +- 'magenta' : [35, 39], +- 'red' : [31, 39], +- 'yellow' : [33, 39] +-}; +- +-// Don't use 'blue' not visible on cmd.exe +-inspect.styles = { +- 'special': 'cyan', +- 'number': 'yellow', +- 'boolean': 'yellow', +- 'undefined': 'grey', +- 'null': 'bold', +- 'string': 'green', +- 'date': 'magenta', +- // "name": intentionally not styling +- 'regexp': 'red' +-}; +- +- +-function stylizeWithColor(str, styleType) { +- var style = inspect.styles[styleType]; +- +- if (style) { +- return '\u001b[' + inspect.colors[style][0] + 'm' + str + +- '\u001b[' + inspect.colors[style][1] + 'm'; +- } else { +- return str; +- } +-} +- +- +-function stylizeNoColor(str, styleType) { +- return str; +-} +- +- +-function arrayToHash(array) { +- var hash = {}; +- +- array.forEach(function(val, idx) { +- hash[val] = true; +- }); +- +- return hash; +-} +- +- +-function formatValue(ctx, value, recurseTimes) { +- // Provide a hook for user-specified inspect functions. +- // Check that value is an object with an inspect function on it +- if (ctx.customInspect && +- value && +- isFunction(value.inspect) && +- // Filter out the util module, it's inspect function is special +- value.inspect !== exports.inspect && +- // Also filter out any prototype objects using the circular check. +- !(value.constructor && value.constructor.prototype === value)) { +- var ret = value.inspect(recurseTimes, ctx); +- if (!isString(ret)) { +- ret = formatValue(ctx, ret, recurseTimes); +- } +- return ret; +- } +- +- // Primitive types cannot have properties +- var primitive = formatPrimitive(ctx, value); +- if (primitive) { +- return primitive; +- } +- +- // Look up the keys of the object. +- var keys = Object.keys(value); +- var visibleKeys = arrayToHash(keys); +- +- if (ctx.showHidden) { +- keys = Object.getOwnPropertyNames(value); +- } +- +- // Some type of object without properties can be shortcutted. +- if (keys.length === 0) { +- if (isFunction(value)) { +- var name = value.name ? ': ' + value.name : ''; +- return ctx.stylize('[Function' + name + ']', 'special'); +- } +- if (isRegExp(value)) { +- return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); +- } +- if (isDate(value)) { +- return ctx.stylize(Date.prototype.toString.call(value), 'date'); +- } +- if (isError(value)) { +- return formatError(value); +- } +- } +- +- var base = '', array = false, braces = ['{', '}']; +- +- // Make Array say that they are Array +- if (isArray(value)) { +- array = true; +- braces = ['[', ']']; +- } +- +- // Make functions say that they are functions +- if (isFunction(value)) { +- var n = value.name ? ': ' + value.name : ''; +- base = ' [Function' + n + ']'; +- } +- +- // Make RegExps say that they are RegExps +- if (isRegExp(value)) { +- base = ' ' + RegExp.prototype.toString.call(value); +- } +- +- // Make dates with properties first say the date +- if (isDate(value)) { +- base = ' ' + Date.prototype.toUTCString.call(value); +- } +- +- // Make error with message first say the error +- if (isError(value)) { +- base = ' ' + formatError(value); +- } +- +- if (keys.length === 0 && (!array || value.length == 0)) { +- return braces[0] + base + braces[1]; +- } +- +- if (recurseTimes < 0) { +- if (isRegExp(value)) { +- return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); +- } else { +- return ctx.stylize('[Object]', 'special'); +- } +- } +- +- ctx.seen.push(value); +- +- var output; +- if (array) { +- output = formatArray(ctx, value, recurseTimes, visibleKeys, keys); +- } else { +- output = keys.map(function(key) { +- return formatProperty(ctx, value, recurseTimes, visibleKeys, key, array); +- }); +- } +- +- ctx.seen.pop(); +- +- return reduceToSingleString(output, base, braces); +-} +- +- +-function formatPrimitive(ctx, value) { +- if (isUndefined(value)) +- return ctx.stylize('undefined', 'undefined'); +- if (isString(value)) { +- var simple = '\'' + JSON.stringify(value).replace(/^"|"$/g, '') +- .replace(/'/g, "\\'") +- .replace(/\\"/g, '"') + '\''; +- return ctx.stylize(simple, 'string'); +- } +- if (isNumber(value)) { +- // Format -0 as '-0'. Strict equality won't distinguish 0 from -0, +- // so instead we use the fact that 1 / -0 < 0 whereas 1 / 0 > 0 . +- if (value === 0 && 1 / value < 0) +- return ctx.stylize('-0', 'number'); +- return ctx.stylize('' + value, 'number'); +- } +- if (isBoolean(value)) +- return ctx.stylize('' + value, 'boolean'); +- // For some reason typeof null is "object", so special case here. +- if (isNull(value)) +- return ctx.stylize('null', 'null'); +-} +- +- +-function formatError(value) { +- return '[' + Error.prototype.toString.call(value) + ']'; +-} +- +- +-function formatArray(ctx, value, recurseTimes, visibleKeys, keys) { +- var output = []; +- for (var i = 0, l = value.length; i < l; ++i) { +- if (hasOwnProperty(value, String(i))) { +- output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, +- String(i), true)); +- } else { +- output.push(''); +- } +- } +- keys.forEach(function(key) { +- if (!key.match(/^\d+$/)) { +- output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, +- key, true)); +- } +- }); +- return output; +-} +- +- +-function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) { +- var name, str, desc; +- desc = Object.getOwnPropertyDescriptor(value, key) || { value: value[key] }; +- if (desc.get) { +- if (desc.set) { +- str = ctx.stylize('[Getter/Setter]', 'special'); +- } else { +- str = ctx.stylize('[Getter]', 'special'); +- } +- } else { +- if (desc.set) { +- str = ctx.stylize('[Setter]', 'special'); +- } +- } +- if (!hasOwnProperty(visibleKeys, key)) { +- name = '[' + key + ']'; +- } +- if (!str) { +- if (ctx.seen.indexOf(desc.value) < 0) { +- if (isNull(recurseTimes)) { +- str = formatValue(ctx, desc.value, null); +- } else { +- str = formatValue(ctx, desc.value, recurseTimes - 1); +- } +- if (str.indexOf('\n') > -1) { +- if (array) { +- str = str.split('\n').map(function(line) { +- return ' ' + line; +- }).join('\n').substr(2); +- } else { +- str = '\n' + str.split('\n').map(function(line) { +- return ' ' + line; +- }).join('\n'); +- } +- } +- } else { +- str = ctx.stylize('[Circular]', 'special'); +- } +- } +- if (isUndefined(name)) { +- if (array && key.match(/^\d+$/)) { +- return str; +- } +- name = JSON.stringify('' + key); +- if (name.match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)) { +- name = name.substr(1, name.length - 2); +- name = ctx.stylize(name, 'name'); +- } else { +- name = name.replace(/'/g, "\\'") +- .replace(/\\"/g, '"') +- .replace(/(^"|"$)/g, "'"); +- name = ctx.stylize(name, 'string'); +- } +- } +- +- return name + ': ' + str; +-} +- +- +-function reduceToSingleString(output, base, braces) { +- var numLinesEst = 0; +- var length = output.reduce(function(prev, cur) { +- numLinesEst++; +- if (cur.indexOf('\n') >= 0) numLinesEst++; +- return prev + cur.replace(/\u001b\[\d\d?m/g, '').length + 1; +- }, 0); +- +- if (length > 60) { +- return braces[0] + +- (base === '' ? '' : base + '\n ') + +- ' ' + +- output.join(',\n ') + +- ' ' + +- braces[1]; +- } +- +- return braces[0] + base + ' ' + output.join(', ') + ' ' + braces[1]; +-} +- +- + // NOTE: These type checking functions intentionally don't use `instanceof` + // because it is fragile and can be easily faked with `Object.create()`. + function isArray(ar) { +@@ -522,166 +98,10 @@ function isPrimitive(arg) { + exports.isPrimitive = isPrimitive; + + function isBuffer(arg) { +- return arg instanceof Buffer; ++ return Buffer.isBuffer(arg); + } + exports.isBuffer = isBuffer; + + function objectToString(o) { + return Object.prototype.toString.call(o); +-} +- +- +-function pad(n) { +- return n < 10 ? '0' + n.toString(10) : n.toString(10); +-} +- +- +-var months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', +- 'Oct', 'Nov', 'Dec']; +- +-// 26 Feb 16:19:34 +-function timestamp() { +- var d = new Date(); +- var time = [pad(d.getHours()), +- pad(d.getMinutes()), +- pad(d.getSeconds())].join(':'); +- return [d.getDate(), months[d.getMonth()], time].join(' '); +-} +- +- +-// log is just a thin wrapper to console.log that prepends a timestamp +-exports.log = function() { +- console.log('%s - %s', timestamp(), exports.format.apply(exports, arguments)); +-}; +- +- +-/** +- * Inherit the prototype methods from one constructor into another. +- * +- * The Function.prototype.inherits from lang.js rewritten as a standalone +- * function (not on Function.prototype). NOTE: If this file is to be loaded +- * during bootstrapping this function needs to be rewritten using some native +- * functions as prototype setup using normal JavaScript does not work as +- * expected during bootstrapping (see mirror.js in r114903). +- * +- * @param {function} ctor Constructor function which needs to inherit the +- * prototype. +- * @param {function} superCtor Constructor function to inherit prototype from. +- */ +-exports.inherits = function(ctor, superCtor) { +- ctor.super_ = superCtor; +- ctor.prototype = Object.create(superCtor.prototype, { +- constructor: { +- value: ctor, +- enumerable: false, +- writable: true, +- configurable: true +- } +- }); +-}; +- +-exports._extend = function(origin, add) { +- // Don't do anything if add isn't an object +- if (!add || !isObject(add)) return origin; +- +- var keys = Object.keys(add); +- var i = keys.length; +- while (i--) { +- origin[keys[i]] = add[keys[i]]; +- } +- return origin; +-}; +- +-function hasOwnProperty(obj, prop) { +- return Object.prototype.hasOwnProperty.call(obj, prop); +-} +- +- +-// Deprecated old stuff. +- +-exports.p = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- console.error(exports.inspect(arguments[i])); +- } +-}, 'util.p: Use console.error() instead'); +- +- +-exports.exec = exports.deprecate(function() { +- return require('child_process').exec.apply(this, arguments); +-}, 'util.exec is now called `child_process.exec`.'); +- +- +-exports.print = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stdout.write(String(arguments[i])); +- } +-}, 'util.print: Use console.log instead'); +- +- +-exports.puts = exports.deprecate(function() { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stdout.write(arguments[i] + '\n'); +- } +-}, 'util.puts: Use console.log instead'); +- +- +-exports.debug = exports.deprecate(function(x) { +- process.stderr.write('DEBUG: ' + x + '\n'); +-}, 'util.debug: Use console.error instead'); +- +- +-exports.error = exports.deprecate(function(x) { +- for (var i = 0, len = arguments.length; i < len; ++i) { +- process.stderr.write(arguments[i] + '\n'); +- } +-}, 'util.error: Use console.error instead'); +- +- +-exports.pump = exports.deprecate(function(readStream, writeStream, callback) { +- var callbackCalled = false; +- +- function call(a, b, c) { +- if (callback && !callbackCalled) { +- callback(a, b, c); +- callbackCalled = true; +- } +- } +- +- readStream.addListener('data', function(chunk) { +- if (writeStream.write(chunk) === false) readStream.pause(); +- }); +- +- writeStream.addListener('drain', function() { +- readStream.resume(); +- }); +- +- readStream.addListener('end', function() { +- writeStream.end(); +- }); +- +- readStream.addListener('close', function() { +- call(); +- }); +- +- readStream.addListener('error', function(err) { +- writeStream.end(); +- call(err); +- }); +- +- writeStream.addListener('error', function(err) { +- readStream.destroy(); +- call(err); +- }); +-}, 'util.pump(): Use readableStream.pipe() instead'); +- +- +-var uv; +-exports._errnoException = function(err, syscall) { +- if (isUndefined(uv)) uv = process.binding('uv'); +- var errname = uv.errname(err); +- var e = new Error(syscall + ' ' + errname); +- e.code = errname; +- e.errno = errname; +- e.syscall = syscall; +- return e; +-}; ++} \ No newline at end of file diff --git a/src/node_modules/core-util-is/lib/util.js b/src/node_modules/core-util-is/lib/util.js new file mode 100644 index 0000000..ff4c851 --- /dev/null +++ b/src/node_modules/core-util-is/lib/util.js @@ -0,0 +1,107 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. + +function isArray(arg) { + if (Array.isArray) { + return Array.isArray(arg); + } + return objectToString(arg) === '[object Array]'; +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return (objectToString(e) === '[object Error]' || e instanceof Error); +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +exports.isBuffer = Buffer.isBuffer; + +function objectToString(o) { + return Object.prototype.toString.call(o); +} diff --git a/src/node_modules/core-util-is/package.json b/src/node_modules/core-util-is/package.json new file mode 100644 index 0000000..cf45131 --- /dev/null +++ b/src/node_modules/core-util-is/package.json @@ -0,0 +1,66 @@ +{ + "_args": [ + [ + "core-util-is@1.0.2", + "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src" + ] + ], + "_from": "core-util-is@1.0.2", + "_id": "core-util-is@1.0.2", + "_inBundle": false, + "_integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "_location": "/core-util-is", + "_phantomChildren": {}, + "_requested": { + "type": "version", + "registry": true, + "raw": "core-util-is@1.0.2", + "name": "core-util-is", + "escapedName": "core-util-is", + "rawSpec": "1.0.2", + "saveSpec": null, + "fetchSpec": "1.0.2" + }, + "_requiredBy": [ + "/readable-stream", + "/verror" + ], + "_resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "_spec": "1.0.2", + "_where": "/Users/swinkler/Desktop/manning/manning-code/chapter5/creative/terraform-azure-ballroom/src", + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "bugs": { + "url": "https://github.com/isaacs/core-util-is/issues" + }, + "description": "The `util.is*` functions introduced in Node v0.12.", + "devDependencies": { + "tap": "^2.3.0" + }, + "homepage": "https://github.com/isaacs/core-util-is#readme", + "keywords": [ + "util", + "isBuffer", + "isArray", + "isNumber", + "isString", + "isRegExp", + "isThis", + "isThat", + "polyfill" + ], + "license": "MIT", + "main": "lib/util.js", + "name": "core-util-is", + "repository": { + "type": "git", + "url": "git://github.com/isaacs/core-util-is.git" + }, + "scripts": { + "test": "tap test.js" + }, + "version": "1.0.2" +} diff --git a/src/node_modules/core-util-is/test.js b/src/node_modules/core-util-is/test.js new file mode 100644 index 0000000..1a490c6 --- /dev/null +++ b/src/node_modules/core-util-is/test.js @@ -0,0 +1,68 @@ +var assert = require('tap'); + +var t = require('./lib/util'); + +assert.equal(t.isArray([]), true); +assert.equal(t.isArray({}), false); + +assert.equal(t.isBoolean(null), false); +assert.equal(t.isBoolean(true), true); +assert.equal(t.isBoolean(false), true); + +assert.equal(t.isNull(null), true); +assert.equal(t.isNull(undefined), false); +assert.equal(t.isNull(false), false); +assert.equal(t.isNull(), false); + +assert.equal(t.isNullOrUndefined(null), true); +assert.equal(t.isNullOrUndefined(undefined), true); +assert.equal(t.isNullOrUndefined(false), false); +assert.equal(t.isNullOrUndefined(), true); + +assert.equal(t.isNumber(null), false); +assert.equal(t.isNumber('1'), false); +assert.equal(t.isNumber(1), true); + +assert.equal(t.isString(null), false); +assert.equal(t.isString('1'), true); +assert.equal(t.isString(1), false); + +assert.equal(t.isSymbol(null), false); +assert.equal(t.isSymbol('1'), false); +assert.equal(t.isSymbol(1), false); +assert.equal(t.isSymbol(Symbol()), true); + +assert.equal(t.isUndefined(null), false); +assert.equal(t.isUndefined(undefined), true); +assert.equal(t.isUndefined(false), false); +assert.equal(t.isUndefined(), true); + +assert.equal(t.isRegExp(null), false); +assert.equal(t.isRegExp('1'), false); +assert.equal(t.isRegExp(new RegExp()), true); + +assert.equal(t.isObject({}), true); +assert.equal(t.isObject([]), true); +assert.equal(t.isObject(new RegExp()), true); +assert.equal(t.isObject(new Date()), true); + +assert.equal(t.isDate(null), false); +assert.equal(t.isDate('1'), false); +assert.equal(t.isDate(new Date()), true); + +assert.equal(t.isError(null), false); +assert.equal(t.isError({ err: true }), false); +assert.equal(t.isError(new Error()), true); + +assert.equal(t.isFunction(null), false); +assert.equal(t.isFunction({ }), false); +assert.equal(t.isFunction(function() {}), true); + +assert.equal(t.isPrimitive(null), true); +assert.equal(t.isPrimitive(''), true); +assert.equal(t.isPrimitive(0), true); +assert.equal(t.isPrimitive(new Date()), false); + +assert.equal(t.isBuffer(null), false); +assert.equal(t.isBuffer({}), false); +assert.equal(t.isBuffer(new Buffer(0)), true); diff --git a/src/node_modules/dashdash/CHANGES.md b/src/node_modules/dashdash/CHANGES.md new file mode 100644 index 0000000..d7c8f4e --- /dev/null +++ b/src/node_modules/dashdash/CHANGES.md @@ -0,0 +1,364 @@ +# node-dashdash changelog + +## not yet released + +(nothing yet) + +## 1.14.1 + +- [issue #30] Change the output used by dashdash's Bash completion support to + indicate "there are no completions for this argument" to cope with different + sorting rules on different Bash/platforms. For example: + + $ triton -v -p test2 package get # before + ##-no -tritonpackage- completions-## + + $ triton -v -p test2 package get # after + ##-no-completion- -results-## + +## 1.14.0 + +- New `synopsisFromOpt(