From 908742577ab25c8deb265df324665c8fb22a552a Mon Sep 17 00:00:00 2001 From: Umesh Timalsina Date: Wed, 8 Sep 2021 13:15:51 -0500 Subject: [PATCH] Update to tensorflow==2.6.0, spektral==1.0.8 --- bin/parse-layers | 3 +- environment.server.yml | 4 +- environment.worker.yml | 4 +- package-lock.json | 8219 ++++++++++++++--- .../CreateKerasMeta/schemas/activations.json | 65 +- .../CreateKerasMeta/schemas/constraints.json | 16 +- .../CreateKerasMeta/schemas/initializers.json | 68 +- .../CreateKerasMeta/schemas/layers.json | 3691 +++++--- .../CreateKerasMeta/schemas/regularizers.json | 6 +- src/seeds/keras/keras.webgmex | Bin 888310 -> 1056730 bytes 10 files changed, 9713 insertions(+), 2363 deletions(-) diff --git a/bin/parse-layers b/bin/parse-layers index e80dde2..360908f 100755 --- a/bin/parse-layers +++ b/bin/parse-layers @@ -99,7 +99,6 @@ class LayerParser: def parse_layers(self): layers = self._parse_class_module(module_name=keras.layers) - layers.extend(self._parse_class_module(module_name=tf.python.keras.layers.cudnn_recurrent)) layers.extend(self._parse_class_module(module_name=spektral.layers)) layers = self._replace_aliases(layers) self.delete_key(layers) @@ -278,7 +277,7 @@ class LayerParser: else: default = None if param.default is param.empty else param.default - if isinstance(default, tf.python.framework.dtypes.DType): + if isinstance(default, tf.dtypes.DType): default = str(default) params_list.append({ diff --git a/environment.server.yml b/environment.server.yml index 926a7dd..ac96b41 100644 --- a/environment.server.yml +++ b/environment.server.yml @@ -3,6 +3,6 @@ dependencies: - python=3.7 - pip=21.0.1 - pip: - - tensorflow==2.3.0 - - spektral==1.0.3 + - tensorflow==2.6.0 + - spektral==1.0.8 - pyzmq==19.0.1 diff --git a/environment.worker.yml b/environment.worker.yml index f7e2317..b3eefa1 100644 --- a/environment.worker.yml +++ b/environment.worker.yml @@ -3,5 +3,5 @@ dependencies: - python=3.7 - pip=21.0.1 - pip: - - tensorflow==2.3.0 - - spektral==1.0.3 + - tensorflow==2.6.0 + - spektral==1.0.8 diff --git a/package-lock.json b/package-lock.json index 17f9989..a47bceb 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,30 +1,6789 @@ { "name": "deepforge-keras", "version": "2.2.1", - "lockfileVersion": 1, + "lockfileVersion": 2, "requires": true, + "packages": { + "": { + "name": "deepforge-keras", + "version": "2.2.1", + "dependencies": { + "rimraf": "^2.4.0", + "webgme-autoviz": "^2.2.1", + "webgme-easydag": "github:dfst/webgme-easydag", + "webgme-json-importer": "github:deepforge-dev/webgme-json-importer", + "webgme-simple-nodes": "github:brollb/webgme-simple-nodes", + "zeromq": "^6.0.0-beta.6" + }, + "devDependencies": { + "chai": "^3.0.0", + "mocha": "^5.2.0", + "webgme": "^2.42.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.11.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.11.2.tgz", + "integrity": "sha512-Vuj/+7vLo6l1Vi7uuO+1ngCDNeVmNbTngcJFKCR/oEtz8tKz0CJxZEGmPt9KcIloZhOZ3Zit6xbpXT2MDlS9Vw==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/accepts": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", + "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", + "dependencies": { + "mime-types": "~2.1.24", + "negotiator": "0.6.2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "7.4.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.0.tgz", + "integrity": "sha512-+G7P8jJmCHr+S+cLfQxygbWhXy+8YTVGzAkpEbcLo2mLoL7tij/VG41QSHACSf5QgYRhMZYHuNc6drJaO0Da+w==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-node": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/acorn-node/-/acorn-node-1.8.2.tgz", + "integrity": "sha512-8mt+fslDufLYntIoPAaIMUe/lrbrehIiwmR3t2k9LljIzoigEPF27eLk2hy8zSGzmR/ogr7zbRKINMo1u0yh5A==", + "dependencies": { + "acorn": "^7.0.0", + "acorn-walk": "^7.0.0", + "xtend": "^4.0.2" + } + }, + "node_modules/acorn-walk": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz", + "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.1.2.tgz", + "integrity": "sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==", + "engines": { + "node": ">= 0.12.0" + } + }, + "node_modules/adm-zip": { + "version": "0.4.11", + "resolved": "https://registry.npmjs.org/adm-zip/-/adm-zip-0.4.11.tgz", + "integrity": "sha512-L8vcjDTCOIJk7wFvmlEUN7AsSb8T+2JrdP7KINBjzr24TJ5Mwj590sLu3BC7zNZowvJWa/JtPmD8eJCzdtDWjA==", + "engines": { + "node": ">=0.3.0" + } + }, + "node_modules/after": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/after/-/after-0.8.2.tgz", + "integrity": "sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8=" + }, + "node_modules/agentkeepalive": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-3.4.1.tgz", + "integrity": "sha512-MPIwsZU9PP9kOrZpyu2042kYA8Fdt/AedQYkYXucHgF9QoD9dXVp0ypuGnHXSR0hTstBxdt85Xkh4JolYfK5wg==", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha1-q8av7tzqUugJzcA3au0845Y10X8=" + }, + "node_modules/anymatch": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz", + "integrity": "sha512-0XNayC8lTHQ2OI8aljNCN3sSx6hsr/1+rlcDAotXJR7C1oZZHCNsfpbKwMjRA3Uqb5tF1Rae2oloTr4xpq+WjA==", + "dependencies": { + "micromatch": "^2.1.5", + "normalize-path": "^2.0.0" + } + }, + "node_modules/archiver": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/archiver/-/archiver-2.1.1.tgz", + "integrity": "sha1-/2YrSnggFJSj7lRNOjP+dJZQnrw=", + "dependencies": { + "archiver-utils": "^1.3.0", + "async": "^2.0.0", + "buffer-crc32": "^0.2.1", + "glob": "^7.0.0", + "lodash": "^4.8.0", + "readable-stream": "^2.0.0", + "tar-stream": "^1.5.0", + "zip-stream": "^1.2.0" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/archiver-utils": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/archiver-utils/-/archiver-utils-1.3.0.tgz", + "integrity": "sha1-5QtMCccL89aA4y/xt5lOn52JUXQ=", + "dependencies": { + "glob": "^7.0.0", + "graceful-fs": "^4.1.0", + "lazystream": "^1.0.0", + "lodash": "^4.8.0", + "normalize-path": "^2.0.0", + "readable-stream": "^2.0.0" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/argh": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/argh/-/argh-0.1.4.tgz", + "integrity": "sha1-PrTWEpc/xrbcbvM49W91nyrFw6Y=" + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/arr-diff": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-2.0.0.tgz", + "integrity": "sha1-jzuCf5Vai9ZpaX5KQlasPOrjVs8=", + "dependencies": { + "arr-flatten": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/arr-flatten": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/arr-union": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", + "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" + }, + "node_modules/array-unique": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.2.1.tgz", + "integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/arraybuffer.slice": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.7.tgz", + "integrity": "sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog==" + }, + "node_modules/asap": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", + "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=" + }, + "node_modules/asn1.js": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", + "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "dependencies": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/asn1.js/node_modules/bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + }, + "node_modules/assert": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz", + "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==", + "dependencies": { + "object-assign": "^4.1.1", + "util": "0.10.3" + } + }, + "node_modules/assert/node_modules/inherits": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", + "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=" + }, + "node_modules/assert/node_modules/util": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz", + "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=", + "dependencies": { + "inherits": "2.0.1" + } + }, + "node_modules/assertion-error": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.0.2.tgz", + "integrity": "sha1-E8pRXYYgbaC6xm6DTdOX2HWBCUw=", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/assign-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", + "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/async": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", + "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/async-each": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz", + "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==" + }, + "node_modules/async-limiter": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", + "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" + }, + "node_modules/atob": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", + "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", + "bin": { + "atob": "bin/atob.js" + }, + "engines": { + "node": ">= 4.5.0" + } + }, + "node_modules/aws-sdk": { + "version": "2.260.1", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.260.1.tgz", + "integrity": "sha512-NZ5nPImMQD4ULLPbbpBDt7d9aludsYBttOd4dtlxxy+IANrDn9meQn591xOULwaE9usrtbzWkJJFXop3wpznTQ==", + "dependencies": { + "buffer": "4.9.1", + "events": "1.1.1", + "ieee754": "1.1.8", + "jmespath": "0.15.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "uuid": "3.1.0", + "xml2js": "0.4.17" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/aws-sdk/node_modules/buffer": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.1.tgz", + "integrity": "sha1-bRu2AbB6TvztlwlBMgkwJ8lbwpg=", + "deprecated": "This version of 'buffer' is out-of-date. You must update to v4.9.2 or newer", + "dependencies": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "node_modules/aws-sdk/node_modules/ieee754": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.8.tgz", + "integrity": "sha1-vjPUCsEO8ZJnAfbwii2G+/0a0+Q=" + }, + "node_modules/backo2": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/backo2/-/backo2-1.0.2.tgz", + "integrity": "sha1-MasayLEpNjRj41s+u2n038+6eUc=" + }, + "node_modules/balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" + }, + "node_modules/base": { + "version": "0.11.2", + "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", + "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "dependencies": { + "cache-base": "^1.0.1", + "class-utils": "^0.3.5", + "component-emitter": "^1.2.1", + "define-property": "^1.0.0", + "isobject": "^3.0.1", + "mixin-deep": "^1.2.0", + "pascalcase": "^0.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base/node_modules/define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dependencies": { + "is-descriptor": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base/node_modules/is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dependencies": { + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base/node_modules/is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dependencies": { + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base/node_modules/is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/base64-arraybuffer": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz", + "integrity": "sha1-c5JncZI7Whl0etZmqlzUv5xunOg=", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/base64-js": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", + "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==" + }, + "node_modules/base64id": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/base64id/-/base64id-1.0.0.tgz", + "integrity": "sha1-R2iMuZu2gE8OBtPnY7HDLlfY5rY=", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/bcryptjs": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/bcryptjs/-/bcryptjs-2.4.3.tgz", + "integrity": "sha1-mrVie5PmBiH/fNrF2pczAn3x0Ms=" + }, + "node_modules/better-assert": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/better-assert/-/better-assert-1.0.2.tgz", + "integrity": "sha1-QIZrnhueC1W0gYlDEeaPr/rrxSI=", + "dependencies": { + "callsite": "1.0.0" + }, + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz", + "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "optional": true, + "dependencies": { + "file-uri-to-path": "1.0.0" + } + }, + "node_modules/bl": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz", + "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", + "dependencies": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/blob": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/blob/-/blob-0.0.5.tgz", + "integrity": "sha512-gaqbzQPqOoamawKg0LGVd7SzLgXS+JH61oWprSLH+P+abTczqJbhTR8CmJ2u9/bUYNmHTGJx/UEmn6doAvvuig==" + }, + "node_modules/bluebird": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", + "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" + }, + "node_modules/bn.js": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.2.tgz", + "integrity": "sha512-40rZaf3bUNKTVYu9sIeeEGOg7g14Yvnj9kH7b50EiwX0Q7A6umbvfI5tvHaOERH0XigqKkfLkFQxzb4e6CIXnA==" + }, + "node_modules/body-parser": { + "version": "1.18.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz", + "integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=", + "dependencies": { + "bytes": "3.0.0", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "~1.1.2", + "http-errors": "~1.6.3", + "iconv-lite": "0.4.23", + "on-finished": "~2.3.0", + "qs": "6.5.2", + "raw-body": "2.3.3", + "type-is": "~1.6.16" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/bops": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/bops/-/bops-0.0.7.tgz", + "integrity": "sha1-tKClqDmkBkVK8P4FqLkaenZqVOI=", + "dependencies": { + "base64-js": "0.0.2", + "to-utf8": "0.0.1" + } + }, + "node_modules/bops/node_modules/base64-js": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-0.0.2.tgz", + "integrity": "sha1-Ak8Pcq+iW3X5wO5zzU9V7Bvtl4Q=", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/bower": { + "version": "1.8.8", + "resolved": "https://registry.npmjs.org/bower/-/bower-1.8.8.tgz", + "integrity": "sha512-1SrJnXnkP9soITHptSO+ahx3QKp3cVzn8poI6ujqc5SeOkg5iqM1pK9H+DSc2OQ8SnO0jC/NG4Ur/UIwy7574A==", + "deprecated": "We don't recommend using Bower for new projects. Please consider Yarn and Webpack or Parcel. You can read how to migrate legacy project here: https://bower.io/blog/2017/how-to-migrate-away-from-bower/", + "bin": { + "bower": "bin/bower" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.8.tgz", + "integrity": "sha1-wHshHHyVLsH479Uad+8NHTmQopI=", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/braces/-/braces-1.8.5.tgz", + "integrity": "sha1-uneWLhLf+WnWt2cR6RS3N4V79qc=", + "dependencies": { + "expand-range": "^1.8.1", + "preserve": "^0.2.0", + "repeat-element": "^1.1.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/brorand": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", + "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=" + }, + "node_modules/browser-pack": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/browser-pack/-/browser-pack-6.1.0.tgz", + "integrity": "sha512-erYug8XoqzU3IfcU8fUgyHqyOXqIE4tUTTQ+7mqUjQlvnXkOO6OlT9c/ZoJVHYoAaqGxr09CN53G7XIsO4KtWA==", + "dependencies": { + "combine-source-map": "~0.8.0", + "defined": "^1.0.0", + "JSONStream": "^1.0.3", + "safe-buffer": "^5.1.1", + "through2": "^2.0.0", + "umd": "^3.0.0" + }, + "bin": { + "browser-pack": "bin/cmd.js" + } + }, + "node_modules/browser-resolve": { + "version": "1.11.3", + "resolved": "https://registry.npmjs.org/browser-resolve/-/browser-resolve-1.11.3.tgz", + "integrity": "sha512-exDi1BYWB/6raKHmDTCicQfTkqwN5fioMFV4j8BsfMU4R2DK/QfZfK7kOVkmWCNANf0snkBzqGqAJBao9gZMdQ==", + "dependencies": { + "resolve": "1.1.7" + } + }, + "node_modules/browser-resolve/node_modules/resolve": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.1.7.tgz", + "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=" + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "node_modules/browserify": { + "version": "16.2.3", + "resolved": "https://registry.npmjs.org/browserify/-/browserify-16.2.3.tgz", + "integrity": "sha512-zQt/Gd1+W+IY+h/xX2NYMW4orQWhqSwyV+xsblycTtpOuB27h1fZhhNQuipJ4t79ohw4P4mMem0jp/ZkISQtjQ==", + "dependencies": { + "assert": "^1.4.0", + "browser-pack": "^6.0.1", + "browser-resolve": "^1.11.0", + "browserify-zlib": "~0.2.0", + "buffer": "^5.0.2", + "cached-path-relative": "^1.0.0", + "concat-stream": "^1.6.0", + "console-browserify": "^1.1.0", + "constants-browserify": "~1.0.0", + "crypto-browserify": "^3.0.0", + "defined": "^1.0.0", + "deps-sort": "^2.0.0", + "domain-browser": "^1.2.0", + "duplexer2": "~0.1.2", + "events": "^2.0.0", + "glob": "^7.1.0", + "has": "^1.0.0", + "htmlescape": "^1.1.0", + "https-browserify": "^1.0.0", + "inherits": "~2.0.1", + "insert-module-globals": "^7.0.0", + "JSONStream": "^1.0.3", + "labeled-stream-splicer": "^2.0.0", + "mkdirp": "^0.5.0", + "module-deps": "^6.0.0", + "os-browserify": "~0.3.0", + "parents": "^1.0.1", + "path-browserify": "~0.0.0", + "process": "~0.11.0", + "punycode": "^1.3.2", + "querystring-es3": "~0.2.0", + "read-only-stream": "^2.0.0", + "readable-stream": "^2.0.2", + "resolve": "^1.1.4", + "shasum": "^1.0.0", + "shell-quote": "^1.6.1", + "stream-browserify": "^2.0.0", + "stream-http": "^2.0.0", + "string_decoder": "^1.1.1", + "subarg": "^1.0.0", + "syntax-error": "^1.1.1", + "through2": "^2.0.0", + "timers-browserify": "^1.0.1", + "tty-browserify": "0.0.1", + "url": "~0.11.0", + "util": "~0.10.1", + "vm-browserify": "^1.0.0", + "xtend": "^4.0.0" + }, + "bin": { + "browserify": "bin/cmd.js" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/browserify-aes": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", + "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "dependencies": { + "buffer-xor": "^1.0.3", + "cipher-base": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.3", + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/browserify-cipher": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", + "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "dependencies": { + "browserify-aes": "^1.0.4", + "browserify-des": "^1.0.0", + "evp_bytestokey": "^1.0.0" + } + }, + "node_modules/browserify-des": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", + "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", + "dependencies": { + "cipher-base": "^1.0.1", + "des.js": "^1.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/browserify-rsa": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz", + "integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=", + "dependencies": { + "bn.js": "^4.1.0", + "randombytes": "^2.0.1" + } + }, + "node_modules/browserify-rsa/node_modules/bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + }, + "node_modules/browserify-sign": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz", + "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==", + "dependencies": { + "bn.js": "^5.1.1", + "browserify-rsa": "^4.0.1", + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "elliptic": "^6.5.3", + "inherits": "^2.0.4", + "parse-asn1": "^5.1.5", + "readable-stream": "^3.6.0", + "safe-buffer": "^5.2.0" + } + }, + "node_modules/browserify-sign/node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/browserify-sign/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/browserify-sign/node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/browserify-zlib": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", + "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "dependencies": { + "pako": "~1.0.5" + } + }, + "node_modules/browserify/node_modules/events": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/events/-/events-2.1.0.tgz", + "integrity": "sha512-3Zmiobend8P9DjmKAty0Era4jV8oJ0yGYe2nJJAxgymF9+N8F2m0hhZiMoWtcfepExzNKZumFU3ksdQbInGWCg==", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/browserify/node_modules/url": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", + "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", + "dependencies": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "node_modules/bson": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/bson/-/bson-1.0.9.tgz", + "integrity": "sha512-IQX9/h7WdMBIW/q/++tGd+emQr0XMdeZ6icnT/74Xk9fnabWn+gZgpE+9V+gujL3hhJOoNrnDVY7tWdzc7NUTg==", + "deprecated": "Fixed a critical issue with BSON serialization documented in CVE-2019-2391, see https://bit.ly/2KcpXdo for more details", + "engines": { + "node": ">=0.6.19" + } + }, + "node_modules/buffer": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.6.0.tgz", + "integrity": "sha512-/gDYp/UtU0eA1ys8bOs9J6a+E/KWIY+DZ+Q2WESNUA0jFRsJOc0SNUO6xJ5SGA1xueg3NL65W6s+NY5l9cunuw==", + "dependencies": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4" + } + }, + "node_modules/buffer-alloc": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", + "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "dependencies": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "node_modules/buffer-alloc-unsafe": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", + "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=", + "engines": { + "node": "*" + } + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=" + }, + "node_modules/buffer-fill": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", + "integrity": "sha1-+PeLdniYiO858gXNY39o5wISKyw=" + }, + "node_modules/buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" + }, + "node_modules/buffer-shims": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-shims/-/buffer-shims-1.0.0.tgz", + "integrity": "sha1-mXjOMXOIxkmth5MCjDR37wRKi1E=" + }, + "node_modules/buffer-xor": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", + "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=" + }, + "node_modules/builtin-status-codes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", + "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=" + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cache-base": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", + "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "dependencies": { + "collection-visit": "^1.0.0", + "component-emitter": "^1.2.1", + "get-value": "^2.0.6", + "has-value": "^1.0.0", + "isobject": "^3.0.1", + "set-value": "^2.0.0", + "to-object-path": "^0.3.0", + "union-value": "^1.0.0", + "unset-value": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cache-base/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cached-path-relative": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/cached-path-relative/-/cached-path-relative-1.0.2.tgz", + "integrity": "sha512-5r2GqsoEb4qMTTN9J+WzXfjov+hjxT+j3u5K+kIVNIwAd99DLCJE9pBIMP1qVeybV6JiijL385Oz0DcYxfbOIg==" + }, + "node_modules/call-me-maybe": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz", + "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=" + }, + "node_modules/caller-path": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-0.1.0.tgz", + "integrity": "sha1-lAhe9jWB7NPaqSREqP6U6CV3dR8=", + "dependencies": { + "callsites": "^0.2.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/callsite": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/callsite/-/callsite-1.0.0.tgz", + "integrity": "sha1-KAOY5dZkvXQDi28JBRU+borxvCA=", + "engines": { + "node": "*" + } + }, + "node_modules/callsites": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-0.2.0.tgz", + "integrity": "sha1-r6uWJikQp/M8GaV3WCXGnzTjUMo=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/camelcase": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", + "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/catharsis": { + "version": "0.8.11", + "resolved": "https://registry.npmjs.org/catharsis/-/catharsis-0.8.11.tgz", + "integrity": "sha512-a+xUyMV7hD1BrDQA/3iPV7oc+6W26BgVJO05PGEoatMyIuPScQKsde6i3YorWX1qs+AZjnJ18NqdKoCtKiNh1g==", + "dependencies": { + "lodash": "^4.17.14" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/chai": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/chai/-/chai-3.5.0.tgz", + "integrity": "sha1-TQJjewZ/6Vi9v906QOxW/vc3Mkc=", + "dev": true, + "dependencies": { + "assertion-error": "^1.0.1", + "deep-eql": "^0.1.3", + "type-detect": "^1.0.0" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chance": { + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/chance/-/chance-1.0.16.tgz", + "integrity": "sha512-2bgDHH5bVfAXH05SPtjqrsASzZ7h90yCuYT2z4mkYpxxYvJXiIydBFzVieVHZx7wLH1Ag2Azaaej2/zA1XUrNQ==" + }, + "node_modules/chokidar": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-1.7.0.tgz", + "integrity": "sha1-eY5ol3gVHIB2tLNg5e3SjNortGg=", + "deprecated": "Chokidar 2 will break on node v14+. Upgrade to chokidar 3 with 15x less dependencies.", + "dependencies": { + "anymatch": "^1.3.0", + "async-each": "^1.0.0", + "glob-parent": "^2.0.0", + "inherits": "^2.0.1", + "is-binary-path": "^1.0.0", + "is-glob": "^2.0.0", + "path-is-absolute": "^1.0.0", + "readdirp": "^2.0.0" + }, + "optionalDependencies": { + "fsevents": "^1.0.0" + } + }, + "node_modules/cipher-base": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", + "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/class-utils": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", + "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "dependencies": { + "arr-union": "^3.1.0", + "define-property": "^0.2.5", + "isobject": "^3.0.0", + "static-extend": "^0.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/class-utils/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/class-utils/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cli-color": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/cli-color/-/cli-color-1.1.0.tgz", + "integrity": "sha1-3hiM3Ekp2DtnrqBBEPvtQP2/Z3U=", + "dependencies": { + "ansi-regex": "2", + "d": "^0.1.1", + "es5-ext": "^0.10.8", + "es6-iterator": "2", + "memoizee": "^0.3.9", + "timers-ext": "0.1" + } + }, + "node_modules/cliui": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", + "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", + "dependencies": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wrap-ansi": "^2.0.0" + } + }, + "node_modules/code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/collection-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", + "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", + "dependencies": { + "map-visit": "^1.0.0", + "object-visit": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/color": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/color/-/color-0.8.0.tgz", + "integrity": "sha1-iQwHw/1OZJU3Y4kRz2keVFi2/KU=", + "dependencies": { + "color-convert": "^0.5.0", + "color-string": "^0.3.0" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" + }, + "node_modules/color-string": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-0.3.0.tgz", + "integrity": "sha1-J9RvtnAlxcL6JZk7+/V55HhBuZE=", + "dependencies": { + "color-name": "^1.0.0" + } + }, + "node_modules/color/node_modules/color-convert": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-0.5.3.tgz", + "integrity": "sha1-vbbGnOZg+t/+CwAHzER+G59ygr0=" + }, + "node_modules/colornames": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/colornames/-/colornames-0.0.2.tgz", + "integrity": "sha1-2BH9bIT1kClJmorEQ2ICk1uSvjE=" + }, + "node_modules/colors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz", + "integrity": "sha1-BDP0TYCWgP3rYO0mDxsMJi6CpAs=", + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/colorspace": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.0.1.tgz", + "integrity": "sha1-yZx5btMRKLmHalLh7l7gOkpxl0k=", + "dependencies": { + "color": "0.8.x", + "text-hex": "0.0.x" + } + }, + "node_modules/combine-source-map": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/combine-source-map/-/combine-source-map-0.8.0.tgz", + "integrity": "sha1-pY0N8ELBhvz4IqjoAV9UUNLXmos=", + "dependencies": { + "convert-source-map": "~1.1.0", + "inline-source-map": "~0.6.0", + "lodash.memoize": "~3.0.3", + "source-map": "~0.5.3" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "2.15.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz", + "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==" + }, + "node_modules/component-bind": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/component-bind/-/component-bind-1.0.0.tgz", + "integrity": "sha1-AMYIq33Nk4l8AAllGx06jh5zu9E=" + }, + "node_modules/component-emitter": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", + "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==" + }, + "node_modules/component-inherit": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/component-inherit/-/component-inherit-0.0.3.tgz", + "integrity": "sha1-ZF/ErfWLcrZJ1crmUTVhnbJv8UM=" + }, + "node_modules/compress-commons": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/compress-commons/-/compress-commons-1.2.2.tgz", + "integrity": "sha1-UkqfEJA/OoEzibAiXSfEi7dRiQ8=", + "dependencies": { + "buffer-crc32": "^0.2.1", + "crc32-stream": "^2.0.0", + "normalize-path": "^2.0.0", + "readable-stream": "^2.0.0" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.2.tgz", + "integrity": "sha1-qv+81qr4VLROuygDU9WtFlH1mmk=", + "dependencies": { + "accepts": "~1.3.4", + "bytes": "3.0.0", + "compressible": "~2.0.13", + "debug": "2.6.9", + "on-headers": "~1.0.1", + "safe-buffer": "5.1.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", + "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + }, + "node_modules/concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "engines": [ + "node >= 0.8" + ], + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/connect-multiparty": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/connect-multiparty/-/connect-multiparty-2.1.0.tgz", + "integrity": "sha512-DLzhq7mcQKKk/Y83NLY5dp0kxO0xTxA5yu3oMgFBfpWLQR1NArrXMBcEXignTcNFVaXrjkgfepNG3nkfEy9Sow==", + "dependencies": { + "multiparty": "~4.1.3", + "on-finished": "~2.3.0", + "qs": "~6.5.1", + "type-is": "~1.6.15" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/console-browserify": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", + "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==" + }, + "node_modules/constants-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", + "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", + "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.1.3.tgz", + "integrity": "sha1-SCnId+n+SbMWHzvzZziI4gRpmGA=" + }, + "node_modules/cookie": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz", + "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-parser": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cookie-parser/-/cookie-parser-1.4.3.tgz", + "integrity": "sha1-D+MfoZ0AC5X0qt8fU/3CuKIDuqU=", + "dependencies": { + "cookie": "0.3.1", + "cookie-signature": "1.0.6" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" + }, + "node_modules/cookiejar": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.1.2.tgz", + "integrity": "sha512-Mw+adcfzPxcPeI+0WlvRrr/3lGVO0bD75SxX6811cxSh1Wbxx7xZBGK1eVtDf6si8rg2lhnUjsVLMFMfbRIuwA==" + }, + "node_modules/copy-descriptor": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", + "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/copy-to": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/copy-to/-/copy-to-2.0.1.tgz", + "integrity": "sha1-JoD7uAaKSNCGVrYJgJK9r8kG9KU=" + }, + "node_modules/core-js": { + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.11.tgz", + "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==", + "deprecated": "core-js@<3.3 is no longer maintained and not recommended for usage due to the number of issues. Because of the V8 engine whims, feature detection in old core-js versions could cause a slowdown up to 100x even if nothing is polyfilled. Please, upgrade your dependencies to the actual version of core-js.", + "hasInstallScript": true + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + }, + "node_modules/crc": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/crc/-/crc-3.8.0.tgz", + "integrity": "sha512-iX3mfgcTMIq3ZKLIsVFAbv7+Mc10kxabAGQb8HvjA1o3T1PIYprbakQ65d3I+2HGHt6nSKkM9PYjgoJO2KcFBQ==", + "dependencies": { + "buffer": "^5.1.0" + } + }, + "node_modules/crc32-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/crc32-stream/-/crc32-stream-2.0.0.tgz", + "integrity": "sha1-483TtN8xaN10494/u8t7KX/pCPQ=", + "dependencies": { + "crc": "^3.4.4", + "readable-stream": "^2.0.0" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/create-ecdh": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", + "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", + "dependencies": { + "bn.js": "^4.1.0", + "elliptic": "^6.5.3" + } + }, + "node_modules/create-ecdh/node_modules/bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + }, + "node_modules/create-hash": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", + "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "dependencies": { + "cipher-base": "^1.0.1", + "inherits": "^2.0.1", + "md5.js": "^1.3.4", + "ripemd160": "^2.0.1", + "sha.js": "^2.4.0" + } + }, + "node_modules/create-hmac": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", + "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "dependencies": { + "cipher-base": "^1.0.3", + "create-hash": "^1.1.0", + "inherits": "^2.0.1", + "ripemd160": "^2.0.0", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "node_modules/crypto-browserify": { + "version": "3.12.0", + "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", + "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "dependencies": { + "browserify-cipher": "^1.0.0", + "browserify-sign": "^4.0.0", + "create-ecdh": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.0", + "diffie-hellman": "^5.0.0", + "inherits": "^2.0.1", + "pbkdf2": "^3.0.3", + "public-encrypt": "^4.0.0", + "randombytes": "^2.0.0", + "randomfill": "^1.0.3" + }, + "engines": { + "node": "*" + } + }, + "node_modules/cycle": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", + "integrity": "sha1-IegLK+hYD5i0aPN5QwZisEbDStI=", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/d": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/d/-/d-0.1.1.tgz", + "integrity": "sha1-2hhMU10Y2O57oqoim5FACfrhEwk=", + "dependencies": { + "es5-ext": "~0.10.2" + } + }, + "node_modules/dash-ast": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dash-ast/-/dash-ast-1.0.0.tgz", + "integrity": "sha512-Vy4dx7gquTeMcQR/hDkYLGUnwVil6vk4FOOct+djUnHOUWt+zJPJAaRIXaAFkPXtJjvlY7o3rfRu0/3hpnwoUA==" + }, + "node_modules/debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decode-uri-component": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", + "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/deep-eql": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-0.1.3.tgz", + "integrity": "sha1-71WKyrjeJSBs1xOQbXTlaTDrafI=", + "dev": true, + "dependencies": { + "type-detect": "0.1.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/deep-eql/node_modules/type-detect": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-0.1.1.tgz", + "integrity": "sha1-C6XsKohWQORw6k6FBZcZANrFiCI=", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/default-user-agent": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/default-user-agent/-/default-user-agent-1.0.0.tgz", + "integrity": "sha1-FsRu/cq6PtxF8k8r1IaLAbfCrcY=", + "dependencies": { + "os-name": "~1.0.3" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/define-property": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", + "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "dependencies": { + "is-descriptor": "^1.0.2", + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/define-property/node_modules/is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dependencies": { + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/define-property/node_modules/is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dependencies": { + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/define-property/node_modules/is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/define-property/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/define-property/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/defined": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/defined/-/defined-1.0.0.tgz", + "integrity": "sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM=" + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/deps-sort": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/deps-sort/-/deps-sort-2.0.1.tgz", + "integrity": "sha512-1orqXQr5po+3KI6kQb9A4jnXT1PBwggGl2d7Sq2xsnOeI9GPcE/tGcF9UiSZtZBM7MukY4cAh7MemS6tZYipfw==", + "dependencies": { + "JSONStream": "^1.0.3", + "shasum-object": "^1.0.0", + "subarg": "^1.0.0", + "through2": "^2.0.0" + }, + "bin": { + "deps-sort": "bin/cmd.js" + } + }, + "node_modules/des.js": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz", + "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==", + "dependencies": { + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/destroy": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", + "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" + }, + "node_modules/detective": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/detective/-/detective-5.2.0.tgz", + "integrity": "sha512-6SsIx+nUUbuK0EthKjv0zrdnajCCXVYGmbYYiYjFVpzcjwEs/JMDZ8tPRG29J/HhN56t3GJp2cGSWDRjjot8Pg==", + "dependencies": { + "acorn-node": "^1.6.1", + "defined": "^1.0.0", + "minimist": "^1.1.1" + }, + "bin": { + "detective": "bin/detective.js" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/detective/node_modules/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + }, + "node_modules/diagnostics": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/diagnostics/-/diagnostics-1.0.1.tgz", + "integrity": "sha1-rM2wgMgrsl0N1zQwqeaof7tDFUE=", + "dependencies": { + "colorspace": "1.0.x", + "enabled": "1.0.x", + "kuler": "0.0.x" + } + }, + "node_modules/diff": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", + "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/diffie-hellman": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", + "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "dependencies": { + "bn.js": "^4.1.0", + "miller-rabin": "^4.0.0", + "randombytes": "^2.0.0" + } + }, + "node_modules/diffie-hellman/node_modules/bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + }, + "node_modules/digest-header": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/digest-header/-/digest-header-0.0.1.tgz", + "integrity": "sha1-Ecz23uxXZqw3l0TZAcEsuklRS+Y=", + "dependencies": { + "utility": "0.1.11" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/digest-header/node_modules/utility": { + "version": "0.1.11", + "resolved": "https://registry.npmjs.org/utility/-/utility-0.1.11.tgz", + "integrity": "sha1-/eYM+bTkdRlHoM9dEEzik2ciZxU=", + "dependencies": { + "address": ">=0.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/dom-serializer": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", + "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", + "dependencies": { + "domelementtype": "^2.0.1", + "entities": "^2.0.0" + } + }, + "node_modules/domain-browser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", + "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", + "engines": { + "node": ">=0.4", + "npm": ">=1.2" + } + }, + "node_modules/domelementtype": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.0.1.tgz", + "integrity": "sha512-5HOHUDsYZWV8FGWN0Njbr/Rn7f/eWSQi1v7+HsUVwXgn8nWWlL64zKDkS0n8ZmQ3mlWOMuXOnR+7Nx/5tMO5AQ==" + }, + "node_modules/domhandler": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-3.0.0.tgz", + "integrity": "sha512-eKLdI5v9m67kbXQbJSNn1zjh0SDzvzWVWtX+qEI3eMjZw8daH9k8rlj1FZY9memPwjiskQFbe7vHVVJIAqoEhw==", + "dependencies": { + "domelementtype": "^2.0.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/domutils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.1.0.tgz", + "integrity": "sha512-CD9M0Dm1iaHfQ1R/TI+z3/JWp/pgub0j4jIQKH89ARR4ATAV2nbaOQS5XxU9maJP5jHaPdDDQSEHuE2UmpUTKg==", + "dependencies": { + "dom-serializer": "^0.2.1", + "domelementtype": "^2.0.1", + "domhandler": "^3.0.0" + } + }, + "node_modules/double-ended-queue": { + "version": "2.1.0-0", + "resolved": "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz", + "integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw=" + }, + "node_modules/duplexer2": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", + "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", + "dependencies": { + "readable-stream": "^2.0.2" + } + }, + "node_modules/duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "dependencies": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" + }, + "node_modules/ejs": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/ejs/-/ejs-2.6.1.tgz", + "integrity": "sha512-0xy4A/twfrRCnkhfk8ErDi5DqdAsAqeGxht4xkCUrsvhhbQNs7E+4jV0CN7+NKIY0aHE72+XvqtBIXzD31ZbXQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/elliptic": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz", + "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==", + "dependencies": { + "bn.js": "^4.4.0", + "brorand": "^1.0.1", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.0" + } + }, + "node_modules/elliptic/node_modules/bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + }, + "node_modules/emits": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emits/-/emits-3.0.0.tgz", + "integrity": "sha1-MnUrupXhcHshlWI4Srm7ix/WL3A=" + }, + "node_modules/enabled": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/enabled/-/enabled-1.0.2.tgz", + "integrity": "sha1-ll9lE9LC0cX0ZStkouM5ZGf8L5M=", + "dependencies": { + "env-variable": "0.0.x" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/engine.io": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.2.1.tgz", + "integrity": "sha512-+VlKzHzMhaU+GsCIg4AoXF1UdDFjHHwMmMKqMJNDNLlUlejz58FCy4LBqB2YVJskHGYl06BatYWKP2TVdVXE5w==", + "dependencies": { + "accepts": "~1.3.4", + "base64id": "1.0.0", + "cookie": "0.3.1", + "debug": "~3.1.0", + "engine.io-parser": "~2.1.0", + "ws": "~3.3.1" + } + }, + "node_modules/engine.io-client": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.2.1.tgz", + "integrity": "sha512-y5AbkytWeM4jQr7m/koQLc5AxpRKC1hEVUb/s1FUAWEJq5AzJJ4NLvzuKPuxtDi5Mq755WuDvZ6Iv2rXj4PTzw==", + "dependencies": { + "component-emitter": "1.2.1", + "component-inherit": "0.0.3", + "debug": "~3.1.0", + "engine.io-parser": "~2.1.1", + "has-cors": "1.1.0", + "indexof": "0.0.1", + "parseqs": "0.0.5", + "parseuri": "0.0.5", + "ws": "~3.3.1", + "xmlhttprequest-ssl": "~1.5.4", + "yeast": "0.1.2" + } + }, + "node_modules/engine.io-client/node_modules/component-emitter": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", + "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=" + }, + "node_modules/engine.io-parser": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.1.3.tgz", + "integrity": "sha512-6HXPre2O4Houl7c4g7Ic/XzPnHBvaEmN90vtRO9uLmwtRqQmTOw0QMevL1TOfL2Cpu1VzsaTmMotQgMdkzGkVA==", + "dependencies": { + "after": "0.8.2", + "arraybuffer.slice": "~0.0.7", + "base64-arraybuffer": "0.1.5", + "blob": "0.0.5", + "has-binary2": "~1.0.2" + } + }, + "node_modules/entities": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", + "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==" + }, + "node_modules/env-variable": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/env-variable/-/env-variable-0.0.6.tgz", + "integrity": "sha512-bHz59NlBbtS0NhftmR8+ExBEekE7br0e01jw+kk0NDro7TtZzBYZ5ScGPs3OmwnpyfHTHOtr1Y6uedCdrIldtg==" + }, + "node_modules/es5-ext": { + "version": "0.10.53", + "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", + "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", + "dependencies": { + "es6-iterator": "~2.0.3", + "es6-symbol": "~3.1.3", + "next-tick": "~1.0.0" + } + }, + "node_modules/es6-iterator": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", + "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", + "dependencies": { + "d": "1", + "es5-ext": "^0.10.35", + "es6-symbol": "^3.1.1" + } + }, + "node_modules/es6-iterator/node_modules/d": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", + "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", + "dependencies": { + "es5-ext": "^0.10.50", + "type": "^1.0.1" + } + }, + "node_modules/es6-promise": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-3.2.1.tgz", + "integrity": "sha1-7FYjOGgDKQkgcXDDlEjiREndH8Q=" + }, + "node_modules/es6-symbol": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", + "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", + "dependencies": { + "d": "^1.0.1", + "ext": "^1.1.2" + } + }, + "node_modules/es6-symbol/node_modules/d": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", + "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", + "dependencies": { + "es5-ext": "^0.10.50", + "type": "^1.0.1" + } + }, + "node_modules/es6-weak-map": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/es6-weak-map/-/es6-weak-map-0.1.4.tgz", + "integrity": "sha1-cGzvnpmqI2undmwjnIueKG6n0ig=", + "dependencies": { + "d": "~0.1.1", + "es5-ext": "~0.10.6", + "es6-iterator": "~0.1.3", + "es6-symbol": "~2.0.1" + } + }, + "node_modules/es6-weak-map/node_modules/es6-iterator": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-0.1.3.tgz", + "integrity": "sha1-1vWLjE/EE8JJtLqhl2j45NfIlE4=", + "dependencies": { + "d": "~0.1.1", + "es5-ext": "~0.10.5", + "es6-symbol": "~2.0.1" + } + }, + "node_modules/es6-weak-map/node_modules/es6-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-2.0.1.tgz", + "integrity": "sha1-dhtcZ8/U8dGK+yNPaR1nhoLLO/M=", + "dependencies": { + "d": "~0.1.1", + "es5-ext": "~0.10.5" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" + }, + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/event-emitter": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", + "integrity": "sha1-34xp7vFkeSPHFXuc6DhAYQsCzDk=", + "dependencies": { + "d": "1", + "es5-ext": "~0.10.14" + } + }, + "node_modules/event-emitter/node_modules/d": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", + "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", + "dependencies": { + "es5-ext": "^0.10.50", + "type": "^1.0.1" + } + }, + "node_modules/events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/evp_bytestokey": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", + "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "dependencies": { + "md5.js": "^1.3.4", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/expand-brackets": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-0.1.5.tgz", + "integrity": "sha1-3wcoTjQqgHzXM6xa9yQR5YHRF3s=", + "dependencies": { + "is-posix-bracket": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/expand-range": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", + "integrity": "sha1-opnv/TNf4nIeuujiV+x5ZE/IUzc=", + "dependencies": { + "fill-range": "^2.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/express": { + "version": "4.16.3", + "resolved": "https://registry.npmjs.org/express/-/express-4.16.3.tgz", + "integrity": "sha1-avilAjUNsyRuzEvs9rWjTSL37VM=", + "dependencies": { + "accepts": "~1.3.5", + "array-flatten": "1.1.1", + "body-parser": "1.18.2", + "content-disposition": "0.5.2", + "content-type": "~1.0.4", + "cookie": "0.3.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "~1.1.2", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.1.1", + "fresh": "0.5.2", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "~2.3.0", + "parseurl": "~1.3.2", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.3", + "qs": "6.5.1", + "range-parser": "~1.2.0", + "safe-buffer": "5.1.1", + "send": "0.16.2", + "serve-static": "1.13.2", + "setprototypeof": "1.1.0", + "statuses": "~1.4.0", + "type-is": "~1.6.16", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/body-parser": { + "version": "1.18.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.2.tgz", + "integrity": "sha1-h2eKGdhLR9hZuDGZvVm84iKxBFQ=", + "dependencies": { + "bytes": "3.0.0", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "~1.1.1", + "http-errors": "~1.6.2", + "iconv-lite": "0.4.19", + "on-finished": "~2.3.0", + "qs": "6.5.1", + "raw-body": "2.3.2", + "type-is": "~1.6.15" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/iconv-lite": { + "version": "0.4.19", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz", + "integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/express/node_modules/qs": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz", + "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/express/node_modules/raw-body": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.2.tgz", + "integrity": "sha1-vNYMd9Prk83gBQKVw/N5OJvIj4k=", + "dependencies": { + "bytes": "3.0.0", + "http-errors": "1.6.2", + "iconv-lite": "0.4.19", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/express/node_modules/raw-body/node_modules/depd": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.1.tgz", + "integrity": "sha1-V4O04cRZ8G+lyif5kfPQbnoxA1k=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/raw-body/node_modules/http-errors": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.2.tgz", + "integrity": "sha1-CgAsyFcHGSp+eUbO7cERVfYOxzY=", + "dependencies": { + "depd": "1.1.1", + "inherits": "2.0.3", + "setprototypeof": "1.0.3", + "statuses": ">= 1.3.1 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/raw-body/node_modules/setprototypeof": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.0.3.tgz", + "integrity": "sha1-ZlZ+NwQ+608E2RvWWMDL77VbjgQ=" + }, + "node_modules/express/node_modules/safe-buffer": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", + "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" + }, + "node_modules/express/node_modules/statuses": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", + "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ext": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", + "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", + "dependencies": { + "type": "^2.0.0" + } + }, + "node_modules/ext/node_modules/type": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/type/-/type-2.0.0.tgz", + "integrity": "sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==" + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", + "dependencies": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/extend-shallow/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/extglob": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-0.3.2.tgz", + "integrity": "sha1-Lhj/PS9JqydlzskCPwEdqo2DSaE=", + "dependencies": { + "is-extglob": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eyes": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", + "integrity": "sha1-Ys8SAjTGg3hdkCNIqADvPgzCC8A=", + "engines": { + "node": "> 0.1.90" + } + }, + "node_modules/fast-safe-stringify": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", + "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" + }, + "node_modules/fd-slicer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.0.1.tgz", + "integrity": "sha1-i1vL2ewyfFBBv5qwI/1nUPEXfmU=", + "dependencies": { + "pend": "~1.2.0" + } + }, + "node_modules/file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "optional": true + }, + "node_modules/filename-regex": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/filename-regex/-/filename-regex-2.0.1.tgz", + "integrity": "sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fill-range": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", + "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", + "dependencies": { + "is-number": "^2.1.0", + "isobject": "^2.0.0", + "randomatic": "^3.0.0", + "repeat-element": "^1.1.2", + "repeat-string": "^1.5.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/finalhandler": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz", + "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "~2.3.0", + "parseurl": "~1.3.2", + "statuses": "~1.4.0", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/finalhandler/node_modules/statuses": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", + "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/for-own": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/for-own/-/for-own-0.1.5.tgz", + "integrity": "sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4=", + "dependencies": { + "for-in": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/form-data": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", + "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/format-util": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/format-util/-/format-util-1.0.5.tgz", + "integrity": "sha512-varLbTj0e0yVyRpqQhuWV+8hlePAgaoFRhNFj50BNjEIrw1/DphHSObtqwskVCPWNgzwPoQrZAbfa/SBiicNeg==" + }, + "node_modules/formidable": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/formidable/-/formidable-1.2.2.tgz", + "integrity": "sha512-V8gLm+41I/8kguQ4/o1D3RIHRmhYFG4pnNyonvua+40rqcEmT4+V71yaZ3B457xbbgCsCfjSPi65u/W6vK1U5Q==", + "funding": { + "url": "https://ko-fi.com/tunnckoCore/commissions" + } + }, + "node_modules/forwarded": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", + "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fragment-cache": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", + "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", + "dependencies": { + "map-cache": "^0.2.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + }, + "node_modules/fsevents": { + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz", + "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==", + "deprecated": "fsevents 1 will break on node v14+ and could be using insecure binaries. Upgrade to fsevents 2.", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "dependencies": { + "bindings": "^1.5.0", + "nan": "^2.12.1" + }, + "engines": { + "node": ">= 4.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + }, + "node_modules/get-assigned-identifiers": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/get-assigned-identifiers/-/get-assigned-identifiers-1.2.0.tgz", + "integrity": "sha512-mBBwmeGTrxEMO4pMaaf/uUEFHnYtwr8FTe8Y/mer4rcV/bye0qGm6pw1bGZFGStxC5O76c5ZAVBGnqHmOaJpdQ==" + }, + "node_modules/get-value": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", + "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + } + }, + "node_modules/glob-base": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/glob-base/-/glob-base-0.3.0.tgz", + "integrity": "sha1-27Fk9iIbHAscz4Kuoyi0l98Oo8Q=", + "dependencies": { + "glob-parent": "^2.0.0", + "is-glob": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/glob-parent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-2.0.0.tgz", + "integrity": "sha1-gTg9ctsFT8zPUzbaqQLxgvbtuyg=", + "dependencies": { + "is-glob": "^2.0.0" + } + }, + "node_modules/got": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/got/-/got-2.4.0.tgz", + "integrity": "sha1-5Ah6LNWbXSDy0WnchdIWntnon1Y=", + "dependencies": { + "duplexify": "^3.2.0", + "infinity-agent": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "object-assign": "^2.0.0", + "prepend-http": "^1.0.0", + "read-all-stream": "^1.0.0", + "statuses": "^1.2.1", + "timed-out": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/got/node_modules/object-assign": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-2.1.1.tgz", + "integrity": "sha1-Q8NuXVaf+OSBbE76i+AtJpZ8GKo=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", + "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" + }, + "node_modules/graceful-readlink": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/graceful-readlink/-/graceful-readlink-1.0.1.tgz", + "integrity": "sha1-TK+tdrxi8C+gObL5Tpo906ORpyU=" + }, + "node_modules/growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true, + "engines": { + "node": ">=4.x" + } + }, + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-binary2": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.3.tgz", + "integrity": "sha512-G1LWKhDSvhGeAQ8mPVQlqNcOB2sJdwATtZKl2pDKKHfpf/rYj24lkinxf69blJbnsvtqqNU+L3SL50vzZhXOnw==", + "dependencies": { + "isarray": "2.0.1" + } + }, + "node_modules/has-binary2/node_modules/isarray": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.1.tgz", + "integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4=" + }, + "node_modules/has-cors": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-cors/-/has-cors-1.1.0.tgz", + "integrity": "sha1-XkdHk/fqmEPRu5nCPu9J/xJv/zk=" + }, + "node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "engines": { + "node": ">=4" + } + }, + "node_modules/has-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", + "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", + "dependencies": { + "get-value": "^2.0.6", + "has-values": "^1.0.0", + "isobject": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-value/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", + "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", + "dependencies": { + "is-number": "^3.0.0", + "kind-of": "^4.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values/node_modules/is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values/node_modules/is-number/node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-values/node_modules/kind-of": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", + "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/hash-base": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", + "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", + "dependencies": { + "inherits": "^2.0.4", + "readable-stream": "^3.6.0", + "safe-buffer": "^5.2.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/hash-base/node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/hash-base/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/hash-base/node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/hash.js": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", + "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "dependencies": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "node_modules/he": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz", + "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=", + "dev": true, + "bin": { + "he": "bin/he" + } + }, + "node_modules/hmac-drbg": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", + "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", + "dependencies": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "node_modules/htmlescape": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/htmlescape/-/htmlescape-1.1.1.tgz", + "integrity": "sha1-OgPtwiFLyjtmQko+eVk0lQnLA1E=", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/htmlparser2": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-4.1.0.tgz", + "integrity": "sha512-4zDq1a1zhE4gQso/c5LP1OtrhYTncXNSpvJYtWJBtXAETPlMfi3IFNjGuQbYLuVY4ZR0QMqRVvo4Pdy9KLyP8Q==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^3.0.0", + "domutils": "^2.0.0", + "entities": "^2.0.0" + } + }, + "node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/https-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", + "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=" + }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=", + "dependencies": { + "ms": "^2.0.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.23", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz", + "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + }, + "node_modules/indexof": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/indexof/-/indexof-0.0.1.tgz", + "integrity": "sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10=" + }, + "node_modules/infinity-agent": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/infinity-agent/-/infinity-agent-1.0.2.tgz", + "integrity": "sha1-Lp2iwHC5hkqLxmwBlOF5HtgFgCU=" + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" + }, + "node_modules/ink-docstrap": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/ink-docstrap/-/ink-docstrap-1.3.2.tgz", + "integrity": "sha512-STx5orGQU1gfrkoI/fMU7lX6CSP7LBGO10gXNgOZhwKhUqbtNjCkYSewJtNnLmWP1tAGN6oyEpG1HFPw5vpa5Q==", + "dependencies": { + "moment": "^2.14.1", + "sanitize-html": "^1.13.0" + } + }, + "node_modules/inline-source-map": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/inline-source-map/-/inline-source-map-0.6.2.tgz", + "integrity": "sha1-+Tk0ccGKedFyT4Y/o4tYY3Ct4qU=", + "dependencies": { + "source-map": "~0.5.3" + } + }, + "node_modules/insert-module-globals": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/insert-module-globals/-/insert-module-globals-7.2.0.tgz", + "integrity": "sha512-VE6NlW+WGn2/AeOMd496AHFYmE7eLKkUY6Ty31k4og5vmA3Fjuwe9v6ifH6Xx/Hz27QvdoMoviw1/pqWRB09Sw==", + "dependencies": { + "acorn-node": "^1.5.2", + "combine-source-map": "^0.8.0", + "concat-stream": "^1.6.1", + "is-buffer": "^1.1.0", + "JSONStream": "^1.0.3", + "path-is-absolute": "^1.0.1", + "process": "~0.11.0", + "through2": "^2.0.0", + "undeclared-identifiers": "^1.1.2", + "xtend": "^4.0.0" + }, + "bin": { + "insert-module-globals": "bin/cmd.js" + } + }, + "node_modules/invert-kv": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", + "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-accessor-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", + "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-binary-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", + "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", + "dependencies": { + "binary-extensions": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + }, + "node_modules/is-data-descriptor": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", + "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", + "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "dependencies": { + "is-accessor-descriptor": "^0.1.6", + "is-data-descriptor": "^0.1.4", + "kind-of": "^5.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-descriptor/node_modules/kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-dotfile": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-dotfile/-/is-dotfile-1.0.3.tgz", + "integrity": "sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-equal-shallow": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz", + "integrity": "sha1-IjgJj8Ih3gvPpdnqxMRdY4qhxTQ=", + "dependencies": { + "is-primitive": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-1.0.0.tgz", + "integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dependencies": { + "number-is-nan": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-2.0.1.tgz", + "integrity": "sha1-0Jb5JqPe1WAPP9/ZEZjLCIjC2GM=", + "dependencies": { + "is-extglob": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", + "integrity": "sha1-Afy7s5NGOlSPL0ZszhbezknbkI8=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-object/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-posix-bracket": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz", + "integrity": "sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-primitive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-2.0.0.tgz", + "integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "node_modules/isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "dependencies": { + "isarray": "1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" + }, + "node_modules/jju": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/jju/-/jju-1.2.1.tgz", + "integrity": "sha1-7fbsINXWaMgMLADOpj+KlCKktSg=" + }, + "node_modules/jmespath": { + "version": "0.15.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", + "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/js-yaml": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", + "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/js2xmlparser": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-4.0.1.tgz", + "integrity": "sha512-KrPTolcw6RocpYjdC7pL7v62e55q7qOMHvLX1UCLc5AAS8qeJ6nukarEJAF2KL2PZxlbGueEbINqZR2bDe/gUw==", + "dependencies": { + "xmlcreate": "^2.0.3" + } + }, + "node_modules/jsdoc": { + "version": "3.6.3", + "resolved": "https://registry.npmjs.org/jsdoc/-/jsdoc-3.6.3.tgz", + "integrity": "sha512-Yf1ZKA3r9nvtMWHO1kEuMZTlHOF8uoQ0vyo5eH7SQy5YeIiHM+B0DgKnn+X6y6KDYZcF7G2SPkKF+JORCXWE/A==", + "dependencies": { + "@babel/parser": "^7.4.4", + "bluebird": "^3.5.4", + "catharsis": "^0.8.11", + "escape-string-regexp": "^2.0.0", + "js2xmlparser": "^4.0.0", + "klaw": "^3.0.0", + "markdown-it": "^8.4.2", + "markdown-it-anchor": "^5.0.2", + "marked": "^0.7.0", + "mkdirp": "^0.5.1", + "requizzle": "^0.2.3", + "strip-json-comments": "^3.0.1", + "taffydb": "2.6.2", + "underscore": "~1.9.1" + }, + "bin": { + "jsdoc": "jsdoc.js" + }, + "engines": { + "node": ">=8.15.0" + } + }, + "node_modules/jsdoc/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/jsdoc/node_modules/marked": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/marked/-/marked-0.7.0.tgz", + "integrity": "sha512-c+yYdCZJQrsRjTPhUx7VKkApw9bwDkNbHUKo1ovgcfDjb2kc8rLuRbIFyXL5WOEUwzSSKo3IXpph2K6DqB/KZg==", + "bin": { + "marked": "bin/marked" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/json-schema-ref-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/json-schema-ref-parser/-/json-schema-ref-parser-3.3.1.tgz", + "integrity": "sha512-stQTMhec2R/p2L9dH4XXRlpNCP0mY8QrLd/9Kl+8SHJQmwHtE1nDfXH4wbsSM+GkJMl8t92yZbI0OIol432CIQ==", + "dependencies": { + "call-me-maybe": "^1.0.1", + "debug": "^3.0.0", + "es6-promise": "^4.1.1", + "js-yaml": "^3.9.1", + "ono": "^4.0.2", + "z-schema": "^3.18.2" + } + }, + "node_modules/json-schema-ref-parser/node_modules/es6-promise": { + "version": "4.2.8", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", + "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" + }, + "node_modules/json-stable-stringify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-0.0.1.tgz", + "integrity": "sha1-YRwj6BTbN1Un34URk9tZ3Sryf0U=", + "dependencies": { + "jsonify": "~0.0.0" + } + }, + "node_modules/jsonify": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz", + "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=", + "engines": { + "node": "*" + } + }, + "node_modules/jsonparse": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", + "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=", + "engines": [ + "node >= 0.2.0" + ] + }, + "node_modules/JSONStream": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", + "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==", + "dependencies": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + }, + "bin": { + "JSONStream": "bin.js" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jsonwebtoken": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-8.3.0.tgz", + "integrity": "sha512-oge/hvlmeJCH+iIz1DwcO7vKPkNGJHhgkspk8OH3VKlw+mbi42WtD4ig1+VXRln765vxptAv+xT26Fd3cteqag==", + "dependencies": { + "jws": "^3.1.5", + "lodash.includes": "^4.3.0", + "lodash.isboolean": "^3.0.3", + "lodash.isinteger": "^4.0.4", + "lodash.isnumber": "^3.0.3", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.once": "^4.0.0", + "ms": "^2.1.1" + }, + "engines": { + "node": ">=0.12", + "npm": ">=1.4.28" + } + }, + "node_modules/jwa": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", + "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", + "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", + "dependencies": { + "jwa": "^1.4.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/klaw": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/klaw/-/klaw-3.0.0.tgz", + "integrity": "sha512-0Fo5oir+O9jnXu5EefYbVK+mHMBeEVEy2cmctR1O1NECcCkPRreJKrS6Qt/j3KC2C148Dfo9i3pCmCMsdqGr0g==", + "dependencies": { + "graceful-fs": "^4.1.9" + } + }, + "node_modules/kuler": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/kuler/-/kuler-0.0.0.tgz", + "integrity": "sha1-tmu0a5NOVQ9Z2BiEjgq7pPf1VTw=", + "dependencies": { + "colornames": "0.0.2" + } + }, + "node_modules/labeled-stream-splicer": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/labeled-stream-splicer/-/labeled-stream-splicer-2.0.2.tgz", + "integrity": "sha512-Ca4LSXFFZUjPScRaqOcFxneA0VpKZr4MMYCljyQr4LIewTLb3Y0IUTIsnBBsVubIeEfxeSZpSjSsRM8APEQaAw==", + "dependencies": { + "inherits": "^2.0.1", + "stream-splicer": "^2.0.0" + } + }, + "node_modules/lazystream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lazystream/-/lazystream-1.0.0.tgz", + "integrity": "sha1-9plf4PggOS9hOWvolGJAe7dxaOQ=", + "dependencies": { + "readable-stream": "^2.0.5" + }, + "engines": { + "node": ">= 0.6.3" + } + }, + "node_modules/lcid": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", + "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", + "dependencies": { + "invert-kv": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/linkify-it": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", + "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==", + "dependencies": { + "uc.micro": "^1.0.1" + } + }, + "node_modules/lodash": { + "version": "4.17.19", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", + "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==" + }, + "node_modules/lodash.get": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", + "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=" + }, + "node_modules/lodash.includes": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", + "integrity": "sha1-YLuYqHy5I8aMoeUTJUgzFISfVT8=" + }, + "node_modules/lodash.isboolean": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", + "integrity": "sha1-bC4XHbKiV82WgC/UOwGyDV9YcPY=" + }, + "node_modules/lodash.isequal": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", + "integrity": "sha1-QVxEePK8wwEgwizhDtMib30+GOA=" + }, + "node_modules/lodash.isinteger": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", + "integrity": "sha1-YZwK89A/iwTDH1iChAt3sRzWg0M=" + }, + "node_modules/lodash.isnumber": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", + "integrity": "sha1-POdoEMWSjQM1IwGsKHMX8RwLH/w=" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=" + }, + "node_modules/lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=" + }, + "node_modules/lodash.memoize": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-3.0.4.tgz", + "integrity": "sha1-LcvSwofLwKVcxCMovQxzYVDVPj8=" + }, + "node_modules/lodash.once": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", + "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=" + }, + "node_modules/lowercase-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lru-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/lru-queue/-/lru-queue-0.1.0.tgz", + "integrity": "sha1-Jzi9nw089PhEkMVzbEhpmsYyzaM=", + "dependencies": { + "es5-ext": "~0.10.2" + } + }, + "node_modules/map-cache": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", + "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/map-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", + "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", + "dependencies": { + "object-visit": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/markdown-it": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-8.4.2.tgz", + "integrity": "sha512-GcRz3AWTqSUphY3vsUqQSFMbgR38a4Lh3GWlHRh/7MRwz8mcu9n2IO7HOh+bXHrR9kOPDl5RNCaEsrneb+xhHQ==", + "dependencies": { + "argparse": "^1.0.7", + "entities": "~1.1.1", + "linkify-it": "^2.0.0", + "mdurl": "^1.0.1", + "uc.micro": "^1.0.5" + }, + "bin": { + "markdown-it": "bin/markdown-it.js" + } + }, + "node_modules/markdown-it-anchor": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.3.0.tgz", + "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==", + "peerDependencies": { + "markdown-it": "*" + } + }, + "node_modules/markdown-it/node_modules/entities": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", + "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" + }, + "node_modules/marked": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/marked/-/marked-1.1.1.tgz", + "integrity": "sha512-mJzT8D2yPxoPh7h0UXkB+dBj4FykPJ2OIfxAWeIHrvoHDkFxukV/29QxoFQoPM6RLEwhIFdJpmKBlqVM3s2ZIw==", + "bin": { + "marked": "bin/marked" + }, + "engines": { + "node": ">= 8.16.2" + } + }, + "node_modules/math-random": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.4.tgz", + "integrity": "sha512-rUxjysqif/BZQH2yhd5Aaq7vXMSx9NdEsQcyA07uEzIvxgI7zIr33gGsh+RU0/XjmQpCW7RsVof1vlkvQVCK5A==" + }, + "node_modules/md5.js": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", + "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", + "dependencies": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/mdurl": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", + "integrity": "sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4=" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memoizee": { + "version": "0.3.10", + "resolved": "https://registry.npmjs.org/memoizee/-/memoizee-0.3.10.tgz", + "integrity": "sha1-TsoNiu057J0Bf0xcLy9kMvQuXI8=", + "dependencies": { + "d": "~0.1.1", + "es5-ext": "~0.10.11", + "es6-weak-map": "~0.1.4", + "event-emitter": "~0.3.4", + "lru-queue": "0.1", + "next-tick": "~0.2.2", + "timers-ext": "0.1" + } + }, + "node_modules/memoizee/node_modules/next-tick": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-0.2.2.tgz", + "integrity": "sha1-ddpKkn7liH45BliABltzNkE7MQ0=" + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" + }, + "node_modules/method-override": { + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/method-override/-/method-override-2.3.10.tgz", + "integrity": "sha1-49r41d7hDdLc59SuiNYrvud0drQ=", + "dependencies": { + "debug": "2.6.9", + "methods": "~1.1.2", + "parseurl": "~1.3.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/method-override/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/method-override/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromatch": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-2.3.11.tgz", + "integrity": "sha1-hmd8l9FyCzY0MdBNDRUpO9OMFWU=", + "dependencies": { + "arr-diff": "^2.0.0", + "array-unique": "^0.2.1", + "braces": "^1.8.2", + "expand-brackets": "^0.1.4", + "extglob": "^0.3.1", + "filename-regex": "^2.0.0", + "is-extglob": "^1.0.0", + "is-glob": "^2.0.1", + "kind-of": "^3.0.2", + "normalize-path": "^2.0.1", + "object.omit": "^2.0.0", + "parse-glob": "^3.0.4", + "regex-cache": "^0.4.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/miller-rabin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", + "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "dependencies": { + "bn.js": "^4.0.0", + "brorand": "^1.0.1" + }, + "bin": { + "miller-rabin": "bin/miller-rabin" + } + }, + "node_modules/miller-rabin/node_modules/bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.44.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", + "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.27", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", + "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", + "dependencies": { + "mime-db": "1.44.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimalistic-crypto-utils": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", + "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=" + }, + "node_modules/minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=" + }, + "node_modules/minimize": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/minimize/-/minimize-2.0.0.tgz", + "integrity": "sha1-emssOzrVlkidDUTX/QpvPi3krT8=", + "dependencies": { + "argh": "~0.1.4", + "async": "~2.0.0-rc.6", + "cli-color": "~1.1.0", + "diagnostics": "~1.0.1", + "emits": "~3.0.0", + "htmlparser2": "~3.9.1", + "node-uuid": "~1.4.7" + }, + "bin": { + "minimize": "bin/minimize" + } + }, + "node_modules/minimize/node_modules/async": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/async/-/async-2.0.1.tgz", + "integrity": "sha1-twnMAoCpw28J9FNr6CPIOKkEniU=", + "dependencies": { + "lodash": "^4.8.0" + } + }, + "node_modules/minimize/node_modules/domelementtype": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", + "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" + }, + "node_modules/minimize/node_modules/domhandler": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", + "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", + "dependencies": { + "domelementtype": "1" + } + }, + "node_modules/minimize/node_modules/domutils": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", + "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", + "dependencies": { + "dom-serializer": "0", + "domelementtype": "1" + } + }, + "node_modules/minimize/node_modules/entities": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", + "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" + }, + "node_modules/minimize/node_modules/htmlparser2": { + "version": "3.9.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.9.2.tgz", + "integrity": "sha1-G9+HrMoPP55T+k/M6w9LTLsAszg=", + "dependencies": { + "domelementtype": "^1.3.0", + "domhandler": "^2.3.0", + "domutils": "^1.5.1", + "entities": "^1.1.1", + "inherits": "^2.0.1", + "readable-stream": "^2.0.2" + } + }, + "node_modules/minimize/node_modules/node-uuid": { + "version": "1.4.8", + "resolved": "https://registry.npmjs.org/node-uuid/-/node-uuid-1.4.8.tgz", + "integrity": "sha1-sEDrCSOWivq/jTL7HxfxFn/auQc=", + "deprecated": "Use uuid module instead", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/mixin-deep": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", + "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", + "dependencies": { + "for-in": "^1.0.2", + "is-extendable": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/mixin-deep/node_modules/is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dependencies": { + "is-plain-object": "^2.0.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/mkdirp": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "deprecated": "Legacy versions of mkdirp are no longer supported. Please update to mkdirp 1.x. (Note that the API surface has changed to use Promises in 1.x.)", + "dependencies": { + "minimist": "0.0.8" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/mocha": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.2.0.tgz", + "integrity": "sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ==", + "dev": true, + "dependencies": { + "browser-stdout": "1.3.1", + "commander": "2.15.1", + "debug": "3.1.0", + "diff": "3.5.0", + "escape-string-regexp": "1.0.5", + "glob": "7.1.2", + "growl": "1.10.5", + "he": "1.1.1", + "minimatch": "3.0.4", + "mkdirp": "0.5.1", + "supports-color": "5.4.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/mocha/node_modules/debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/mocha/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "node_modules/module-deps": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/module-deps/-/module-deps-6.2.3.tgz", + "integrity": "sha512-fg7OZaQBcL4/L+AK5f4iVqf9OMbCclXfy/znXRxTVhJSeW5AIlS9AwheYwDaXM3lVW7OBeaeUEY3gbaC6cLlSA==", + "dependencies": { + "browser-resolve": "^2.0.0", + "cached-path-relative": "^1.0.2", + "concat-stream": "~1.6.0", + "defined": "^1.0.0", + "detective": "^5.2.0", + "duplexer2": "^0.1.2", + "inherits": "^2.0.1", + "JSONStream": "^1.0.3", + "parents": "^1.0.0", + "readable-stream": "^2.0.2", + "resolve": "^1.4.0", + "stream-combiner2": "^1.1.1", + "subarg": "^1.0.0", + "through2": "^2.0.0", + "xtend": "^4.0.0" + }, + "bin": { + "module-deps": "bin/cmd.js" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/module-deps/node_modules/browser-resolve": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/browser-resolve/-/browser-resolve-2.0.0.tgz", + "integrity": "sha512-7sWsQlYL2rGLy2IWm8WL8DCTJvYLc/qlOnsakDac87SOoCd16WLsaAMdCiAqsTNHIe+SXfaqyxyo6THoWqs8WQ==", + "dependencies": { + "resolve": "^1.17.0" + } + }, + "node_modules/moment": { + "version": "2.27.0", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.27.0.tgz", + "integrity": "sha512-al0MUK7cpIcglMv3YF13qSgdAIqxHTO7brRtaz3DlSULbqfazqkc5kEjNrLDOM7fsjshoFIihnU8snrP7zUvhQ==", + "engines": { + "node": "*" + } + }, + "node_modules/mongo-uri": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/mongo-uri/-/mongo-uri-0.1.2.tgz", + "integrity": "sha1-FzrwFAMzkALgq9C01nWYfTzc+Z4=", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/mongodb": { + "version": "2.2.35", + "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-2.2.35.tgz", + "integrity": "sha512-3HGLucDg/8EeYMin3k+nFWChTA85hcYDCw1lPsWR6yV9A6RgKb24BkLiZ9ySZR+S0nfBjWoIUS7cyV6ceGx5Gg==", + "dependencies": { + "es6-promise": "3.2.1", + "mongodb-core": "2.1.19", + "readable-stream": "2.2.7" + }, + "engines": { + "node": ">=0.10.3" + } + }, + "node_modules/mongodb-core": { + "version": "2.1.19", + "resolved": "https://registry.npmjs.org/mongodb-core/-/mongodb-core-2.1.19.tgz", + "integrity": "sha512-Jt4AtWUkpuW03kRdYGxga4O65O1UHlFfvvInslEfLlGi+zDMxbBe3J2NVmN9qPJ957Mn6Iz0UpMtV80cmxCVxw==", + "dependencies": { + "bson": "~1.0.4", + "require_optional": "~1.0.0" + } + }, + "node_modules/mongodb/node_modules/process-nextick-args": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz", + "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=" + }, + "node_modules/mongodb/node_modules/readable-stream": { + "version": "2.2.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.2.7.tgz", + "integrity": "sha1-BwV6y+JGeyIELTb5jFrVBwVOlbE=", + "dependencies": { + "buffer-shims": "~1.0.0", + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "~1.0.0", + "process-nextick-args": "~1.0.6", + "string_decoder": "~1.0.0", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/mongodb/node_modules/string_decoder": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.0.3.tgz", + "integrity": "sha512-4AH6Z5fzNNBcH+6XDMfA/BTt87skxqJlO0lAh3Dker5zThcAxG6mKz+iGu308UKoPPQ8Dcqx/4JhujzltRa+hQ==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/msgpack-js": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/msgpack-js/-/msgpack-js-0.3.0.tgz", + "integrity": "sha1-Aw7AjFlW+cp9F9QKVy1Tlv7BCSM=", + "dependencies": { + "bops": "~0.0.6" + } + }, + "node_modules/multiparty": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/multiparty/-/multiparty-4.1.4.tgz", + "integrity": "sha1-TJbcvcEeP3kX4WFeZAtLUCK+ZP0=", + "dependencies": { + "fd-slicer": "~1.0.1", + "safe-buffer": "5.1.2" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/nan": { + "version": "2.14.1", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.1.tgz", + "integrity": "sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw==", + "optional": true + }, + "node_modules/nanomatch": { + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", + "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "dependencies": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "fragment-cache": "^0.2.1", + "is-windows": "^1.0.2", + "kind-of": "^6.0.2", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nanomatch/node_modules/arr-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nanomatch/node_modules/array-unique": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nanomatch/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ncp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ncp/-/ncp-2.0.0.tgz", + "integrity": "sha1-GVoh1sRuNh0vsSgbo4uR6d9727M=", + "bin": { + "ncp": "bin/ncp" + } + }, + "node_modules/negotiator": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", + "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/next-tick": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", + "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" + }, + "node_modules/node-gyp-build": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz", + "integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg==", + "bin": { + "node-gyp-build": "bin.js", + "node-gyp-build-optional": "optional.js", + "node-gyp-build-test": "build-test.js" + } + }, + "node_modules/nodemailer": { + "version": "6.4.11", + "resolved": "https://registry.npmjs.org/nodemailer/-/nodemailer-6.4.11.tgz", + "integrity": "sha512-BVZBDi+aJV4O38rxsUh164Dk1NCqgh6Cm0rQSb9SK/DHGll/DrCMnycVDD7msJgZCnmVa8ASo8EZzR7jsgTukQ==", + "hasInstallScript": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "dependencies": { + "remove-trailing-separator": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/notepack.io": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/notepack.io/-/notepack.io-2.1.3.tgz", + "integrity": "sha512-AgSt+cP5XMooho1Ppn8NB3FFaVWefV+qZoZncYTUSch2GAEwlYLcIIbT5YVkMlFeNHnfwOvc4HDlbvrB5BRxXA==" + }, + "node_modules/number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nunjucks": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/nunjucks/-/nunjucks-2.4.3.tgz", + "integrity": "sha1-lhzLDzGABI7ptpzLboLBy5ReXs0=", + "dependencies": { + "asap": "^2.0.3", + "chokidar": "^1.0.0", + "yargs": "^3.32.0" + }, + "bin": { + "nunjucks-precompile": "bin/precompile" + }, + "engines": { + "node": "*" + } + }, + "node_modules/nunjucks-markdown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/nunjucks-markdown/-/nunjucks-markdown-2.0.1.tgz", + "integrity": "sha1-1V51Qzo1hQ4sNFZR/j+THtmxVqI=", + "peerDependencies": { + "nunjucks": "^2.3.0 || ^3.0.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-component": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/object-component/-/object-component-0.0.3.tgz", + "integrity": "sha1-8MaapQ78lbhmwYb0AKM3acsvEpE=" + }, + "node_modules/object-copy": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", + "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", + "dependencies": { + "copy-descriptor": "^0.1.0", + "define-property": "^0.2.5", + "kind-of": "^3.0.3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-copy/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-visit": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", + "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", + "dependencies": { + "isobject": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-visit/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object.omit": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/object.omit/-/object.omit-2.0.1.tgz", + "integrity": "sha1-Gpx0SCnznbuFjHbKNXmuKlTr0fo=", + "dependencies": { + "for-own": "^0.1.4", + "is-extendable": "^0.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object.pick": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", + "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object.pick/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/on-finished": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", + "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/ono": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/ono/-/ono-4.0.11.tgz", + "integrity": "sha512-jQ31cORBFE6td25deYeD80wxKBMj+zBmHTrVxnc6CKhx8gho6ipmWM5zj/oeoqioZ99yqBls9Z/9Nss7J26G2g==", + "dependencies": { + "format-util": "^1.0.3" + } + }, + "node_modules/os-browserify": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", + "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=" + }, + "node_modules/os-locale": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz", + "integrity": "sha1-IPnxeuKe00XoveWDsT0gCYA8FNk=", + "dependencies": { + "lcid": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/os-name": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/os-name/-/os-name-1.0.3.tgz", + "integrity": "sha1-GzefZINa98Wn9JizV8uVIVwVnt8=", + "dependencies": { + "osx-release": "^1.0.0", + "win-release": "^1.0.0" + }, + "bin": { + "os-name": "cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/osx-release": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/osx-release/-/osx-release-1.1.0.tgz", + "integrity": "sha1-8heRGigTaUmvG/kwiyQeJzfTzWw=", + "dependencies": { + "minimist": "^1.1.0" + }, + "bin": { + "osx-release": "cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/osx-release/node_modules/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + }, + "node_modules/pako": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" + }, + "node_modules/parents": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parents/-/parents-1.0.1.tgz", + "integrity": "sha1-/t1NK/GTp3dF/nHjcdc8MwfZx1E=", + "dependencies": { + "path-platform": "~0.11.15" + } + }, + "node_modules/parse-asn1": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.5.tgz", + "integrity": "sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==", + "dependencies": { + "asn1.js": "^4.0.0", + "browserify-aes": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.0", + "pbkdf2": "^3.0.3", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/parse-glob": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/parse-glob/-/parse-glob-3.0.4.tgz", + "integrity": "sha1-ssN2z7EfNVE7rdFz7wu246OIORw=", + "dependencies": { + "glob-base": "^0.3.0", + "is-dotfile": "^1.0.0", + "is-extglob": "^1.0.0", + "is-glob": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/parse-srcset": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/parse-srcset/-/parse-srcset-1.0.2.tgz", + "integrity": "sha1-8r0iH2zJcKk42IVWq8WJyqqiveE=" + }, + "node_modules/parseqs": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.5.tgz", + "integrity": "sha1-1SCKNzjkZ2bikbouoXNoSSGouJ0=", + "dependencies": { + "better-assert": "~1.0.0" + } + }, + "node_modules/parseuri": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.5.tgz", + "integrity": "sha1-gCBKUNTbt3m/3G6+J3jZDkvOMgo=", + "dependencies": { + "better-assert": "~1.0.0" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascalcase": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", + "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-browserify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz", + "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==" + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-parse": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", + "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" + }, + "node_modules/path-platform": { + "version": "0.11.15", + "resolved": "https://registry.npmjs.org/path-platform/-/path-platform-0.11.15.tgz", + "integrity": "sha1-6GQhf3TDaFDwhSt43Hv31KVyG/I=", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" + }, + "node_modules/pbkdf2": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.1.tgz", + "integrity": "sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg==", + "dependencies": { + "create-hash": "^1.1.2", + "create-hmac": "^1.1.4", + "ripemd160": "^2.0.1", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=" + }, + "node_modules/pluralize": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-1.1.6.tgz", + "integrity": "sha1-5L9dazayr8IsgB98Nyk2F+C9xW0=" + }, + "node_modules/posix-character-classes": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", + "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postcss": { + "version": "7.0.32", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.32.tgz", + "integrity": "sha512-03eXong5NLnNCD05xscnGKGDZ98CyzoqPSMjOe6SuoQY7Z2hIj0Ld1g/O/UQRuOle2aRtiIRDg9tDcTGAkLfKw==", + "dependencies": { + "chalk": "^2.4.2", + "source-map": "^0.6.1", + "supports-color": "^6.1.0" + }, + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + } + }, + "node_modules/postcss/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postcss/node_modules/supports-color": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", + "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/prepend-http": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz", + "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/preserve": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz", + "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/proxy-addr": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", + "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", + "dependencies": { + "forwarded": "~0.1.2", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/public-encrypt": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", + "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", + "dependencies": { + "bn.js": "^4.1.0", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "parse-asn1": "^5.0.0", + "randombytes": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "node_modules/public-encrypt/node_modules/bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + }, + "node_modules/punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" + }, + "node_modules/q": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", + "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", + "engines": { + "node": ">=0.6.0", + "teleport": ">=0.2.0" + } + }, + "node_modules/qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", + "deprecated": "The querystring API is considered Legacy. new code should use the URLSearchParams API instead.", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/querystring-es3": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", + "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/raml-jsonschema-expander": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/raml-jsonschema-expander/-/raml-jsonschema-expander-1.1.2.tgz", + "integrity": "sha1-nFagwcCpGQjUxBuiVu/EnryG3Uw=", + "dependencies": { + "urllib-sync": "^1.1.0" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/raml-parser": { + "version": "0.8.18", + "resolved": "https://registry.npmjs.org/raml-parser/-/raml-parser-0.8.18.tgz", + "integrity": "sha1-CHM3UDT4uKHSDBDjFHtiebM0l6g=", + "dependencies": { + "got": "~2.4.0", + "jju": "~1.2.0", + "json-schema-ref-parser": "^3.1.2", + "object-assign": "^4.1.0", + "pluralize": "~1.1.1", + "q": "0.9.7", + "uritemplate": "~0.3.4" + }, + "bin": { + "raml-parser": "bin/raml-parser" + } + }, + "node_modules/raml-parser/node_modules/q": { + "version": "0.9.7", + "resolved": "https://registry.npmjs.org/q/-/q-0.9.7.tgz", + "integrity": "sha1-TeLmyzspCIyeTLwDv51C+5bOL3U=", + "engines": { + "node": ">=0.6.0", + "teleport": ">=0.2.0" + } + }, + "node_modules/raml2html": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/raml2html/-/raml2html-3.0.1.tgz", + "integrity": "sha1-aqeaoJg+o8SgfwF3UceBOCmmUhg=", + "dependencies": { + "commander": "2.9.x", + "marked": "0.3.x", + "minimize": "2.0.x", + "nunjucks": "2.4.x", + "nunjucks-markdown": "2.0.x", + "raml-jsonschema-expander": "1.1.x", + "raml2obj": "3.0.0" + }, + "bin": { + "raml2html": "bin/raml2html" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/raml2html/node_modules/commander": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.9.0.tgz", + "integrity": "sha1-nJkJQXbhIkDLItbFFGCYQA/g99Q=", + "dependencies": { + "graceful-readlink": ">= 1.0.0" + }, + "engines": { + "node": ">= 0.6.x" + } + }, + "node_modules/raml2html/node_modules/marked": { + "version": "0.3.19", + "resolved": "https://registry.npmjs.org/marked/-/marked-0.3.19.tgz", + "integrity": "sha512-ea2eGWOqNxPcXv8dyERdSr/6FmzvWwzjMxpfGB/sbMccXoct+xY+YukPD+QTUZwyvK7BZwcr4m21WBOW41pAkg==", + "bin": { + "marked": "bin/marked" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/raml2obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/raml2obj/-/raml2obj-3.0.0.tgz", + "integrity": "sha1-9w9XPGci4osw1C/bdUOkhoWR678=", + "dependencies": { + "raml-parser": "0.8.x" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/randomatic": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.1.1.tgz", + "integrity": "sha512-TuDE5KxZ0J461RVjrJZCJc+J+zCkTb1MbH9AQUq68sMhOMcy9jLcb3BrZKgp9q9Ncltdg4QVqWrH02W2EFFVYw==", + "dependencies": { + "is-number": "^4.0.0", + "kind-of": "^6.0.0", + "math-random": "^1.0.1" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/randomatic/node_modules/is-number": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", + "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/randomatic/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/randomfill": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", + "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "dependencies": { + "randombytes": "^2.0.5", + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz", + "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==", + "dependencies": { + "bytes": "3.0.0", + "http-errors": "1.6.3", + "iconv-lite": "0.4.23", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/read-all-stream": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/read-all-stream/-/read-all-stream-1.0.2.tgz", + "integrity": "sha1-03jPTvbiNrGI6kLRNeWxgKiePpI=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/read-only-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/read-only-stream/-/read-only-stream-2.0.0.tgz", + "integrity": "sha1-JyT9aoET1zdkrCiNQ4YnDB2/F/A=", + "dependencies": { + "readable-stream": "^2.0.2" + } + }, + "node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/readdirp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", + "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", + "dependencies": { + "graceful-fs": "^4.1.11", + "micromatch": "^3.1.10", + "readable-stream": "^2.0.2" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/readdirp/node_modules/arr-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/array-unique": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/braces": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", + "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "dependencies": { + "arr-flatten": "^1.1.0", + "array-unique": "^0.3.2", + "extend-shallow": "^2.0.1", + "fill-range": "^4.0.0", + "isobject": "^3.0.1", + "repeat-element": "^1.1.2", + "snapdragon": "^0.8.1", + "snapdragon-node": "^2.0.1", + "split-string": "^3.0.2", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/braces/node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", + "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", + "dependencies": { + "debug": "^2.3.3", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "posix-character-classes": "^0.1.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets/node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets/node_modules/is-accessor-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", + "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets/node_modules/is-accessor-descriptor/node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets/node_modules/is-data-descriptor": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", + "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets/node_modules/is-data-descriptor/node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets/node_modules/is-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", + "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "dependencies": { + "is-accessor-descriptor": "^0.1.6", + "is-data-descriptor": "^0.1.4", + "kind-of": "^5.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/expand-brackets/node_modules/kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/extglob": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", + "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "dependencies": { + "array-unique": "^0.3.2", + "define-property": "^1.0.0", + "expand-brackets": "^2.1.4", + "extend-shallow": "^2.0.1", + "fragment-cache": "^0.2.1", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/extglob/node_modules/define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dependencies": { + "is-descriptor": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/extglob/node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/fill-range": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", + "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", + "dependencies": { + "extend-shallow": "^2.0.1", + "is-number": "^3.0.0", + "repeat-string": "^1.6.1", + "to-regex-range": "^2.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/fill-range/node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dependencies": { + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dependencies": { + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/is-number/node_modules/kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dependencies": { + "is-buffer": "^1.1.5" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/micromatch": { + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", + "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "dependencies": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "braces": "^2.3.1", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "extglob": "^2.0.4", + "fragment-cache": "^0.2.1", + "kind-of": "^6.0.2", + "nanomatch": "^1.2.9", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/redis": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/redis/-/redis-2.8.0.tgz", + "integrity": "sha512-M1OkonEQwtRmZv4tEWF2VgpG0JWJ8Fv1PhlgT5+B+uNq2cA3Rt1Yt/ryoR+vQNOQcIEgdCdfH0jr3bDpihAw1A==", + "dependencies": { + "double-ended-queue": "^2.1.0-0", + "redis-commands": "^1.2.0", + "redis-parser": "^2.6.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/redis-commands": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.6.0.tgz", + "integrity": "sha512-2jnZ0IkjZxvguITjFTrGiLyzQZcTvaw8DAaCXxZq/dsHXz7KfMQ3OUJy7Tz9vnRtZRVz6VRCPDvruvU8Ts44wQ==" + }, + "node_modules/redis-parser": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz", + "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/regex-cache": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz", + "integrity": "sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ==", + "dependencies": { + "is-equal-shallow": "^0.1.3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/regex-not": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", + "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "dependencies": { + "extend-shallow": "^3.0.2", + "safe-regex": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" + }, + "node_modules/repeat-element": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz", + "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/require_optional": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/require_optional/-/require_optional-1.0.1.tgz", + "integrity": "sha512-qhM/y57enGWHAe3v/NcwML6a3/vfESLe/sGM2dII+gEO0BpKRUkWZow/tyloNqJyN6kXSl3RyyM8Ll5D/sJP8g==", + "dependencies": { + "resolve-from": "^2.0.0", + "semver": "^5.1.0" + } + }, + "node_modules/require_optional/node_modules/resolve-from": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-2.0.0.tgz", + "integrity": "sha1-lICrIOlP+h2egKgEx+oUdhGWa1c=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-uncached": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/require-uncached/-/require-uncached-1.0.3.tgz", + "integrity": "sha1-Tg1W1slmL9MeQwEcS5WqSZVUIdM=", + "dependencies": { + "caller-path": "^0.1.0", + "resolve-from": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requirejs": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/requirejs/-/requirejs-2.3.5.tgz", + "integrity": "sha512-svnO+aNcR/an9Dpi44C7KSAy5fFGLtmPbaaCeQaklUz8BQhS64tWWIIlvEA5jrWICzlO/X9KSzSeXFnZdBu8nw==", + "bin": { + "r_js": "bin/r.js", + "r.js": "bin/r.js" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/requirejs-text": { + "version": "2.0.15", + "resolved": "https://registry.npmjs.org/requirejs-text/-/requirejs-text-2.0.15.tgz", + "integrity": "sha1-ExOHM2E/xEV7fhJH6Mt1HfeqVCk=" + }, + "node_modules/requizzle": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/requizzle/-/requizzle-0.2.3.tgz", + "integrity": "sha512-YanoyJjykPxGHii0fZP0uUPEXpvqfBDxWV7s6GKAiiOsiqhX6vHNyW3Qzdmqp/iq/ExbhaGbVrjB4ruEVSM4GQ==", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/resolve": { + "version": "1.17.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.17.0.tgz", + "integrity": "sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==", + "dependencies": { + "path-parse": "^1.0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-1.0.1.tgz", + "integrity": "sha1-Jsv+k10a7uq7Kbw/5a6wHpPUQiY=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-url": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", + "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=", + "deprecated": "https://github.com/lydell/resolve-url#deprecated" + }, + "node_modules/ret": { + "version": "0.1.15", + "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", + "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", + "engines": { + "node": ">=0.12" + } + }, + "node_modules/rimraf": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.2.tgz", + "integrity": "sha512-lreewLK/BlghmxtfH36YYVg1i8IAce4TI7oao75I1g245+6BctqTVQiBP3YUJ9C6DQOXJmkYR9X9fCLtCOJc5w==", + "dependencies": { + "glob": "^7.0.5" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + } + }, + "node_modules/rimraf/node_modules/minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ripemd160": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", + "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "dependencies": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/safe-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", + "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", + "dependencies": { + "ret": "~0.1.10" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/sanitize-html": { + "version": "1.27.2", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-1.27.2.tgz", + "integrity": "sha512-REZETvhFFChM3zyQS8XoR02j5U56HtyQkxsc8cb5HEi3XU0AAX9TuKvWe3ESR0F0IA81ZghA+5YpJg8C35AFyQ==", + "dependencies": { + "htmlparser2": "^4.1.0", + "lodash": "^4.17.15", + "parse-srcset": "^1.0.2", + "postcss": "^7.0.27" + } + }, + "node_modules/sax": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", + "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=" + }, + "node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/send": { + "version": "0.16.2", + "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", + "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", + "dependencies": { + "debug": "2.6.9", + "depd": "~1.1.2", + "destroy": "~1.0.4", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "~1.6.2", + "mime": "1.4.1", + "ms": "2.0.0", + "on-finished": "~2.3.0", + "range-parser": "~1.2.0", + "statuses": "~1.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/mime": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", + "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==", + "bin": { + "mime": "cli.js" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/send/node_modules/statuses": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", + "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz", + "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.2", + "send": "0.16.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-value": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", + "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", + "dependencies": { + "extend-shallow": "^2.0.1", + "is-extendable": "^0.1.1", + "is-plain-object": "^2.0.3", + "split-string": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/set-value/node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "node_modules/sha.js": { + "version": "2.4.11", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", + "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + }, + "bin": { + "sha.js": "bin.js" + } + }, + "node_modules/shasum": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/shasum/-/shasum-1.0.2.tgz", + "integrity": "sha1-5wEjENj0F/TetXEhUOVni4euVl8=", + "dependencies": { + "json-stable-stringify": "~0.0.0", + "sha.js": "~2.4.4" + } + }, + "node_modules/shasum-object": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shasum-object/-/shasum-object-1.0.0.tgz", + "integrity": "sha512-Iqo5rp/3xVi6M4YheapzZhhGPVs0yZwHj7wvwQ1B9z8H6zk+FEnI7y3Teq7qwnekfEhu8WmG2z0z4iWZaxLWVg==", + "dependencies": { + "fast-safe-stringify": "^2.0.7" + } + }, + "node_modules/shell-quote": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.2.tgz", + "integrity": "sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg==" + }, + "node_modules/simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/snapdragon": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", + "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "dependencies": { + "base": "^0.11.1", + "debug": "^2.2.0", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "map-cache": "^0.2.2", + "source-map": "^0.5.6", + "source-map-resolve": "^0.5.0", + "use": "^3.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", + "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "dependencies": { + "define-property": "^1.0.0", + "isobject": "^3.0.0", + "snapdragon-util": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node/node_modules/define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dependencies": { + "is-descriptor": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node/node_modules/is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dependencies": { + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node/node_modules/is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dependencies": { + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node/node_modules/is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dependencies": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-node/node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon-util": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", + "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "dependencies": { + "kind-of": "^3.2.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/snapdragon/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon/node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/snapdragon/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/socket.io": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.1.1.tgz", + "integrity": "sha512-rORqq9c+7W0DAK3cleWNSyfv/qKXV99hV4tZe+gGLfBECw3XEhBy7x85F3wypA9688LKjtwO9pX9L33/xQI8yA==", + "dependencies": { + "debug": "~3.1.0", + "engine.io": "~3.2.0", + "has-binary2": "~1.0.2", + "socket.io-adapter": "~1.1.0", + "socket.io-client": "2.1.1", + "socket.io-parser": "~3.2.0" + } + }, + "node_modules/socket.io-adapter": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.2.tgz", + "integrity": "sha512-WzZRUj1kUjrTIrUKpZLEzFZ1OLj5FwLlAFQs9kuZJzJi5DKdU7FsWc36SNmA8iDOtwBQyT8FkrriRM8vXLYz8g==" + }, + "node_modules/socket.io-client": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.1.1.tgz", + "integrity": "sha512-jxnFyhAuFxYfjqIgduQlhzqTcOEQSn+OHKVfAxWaNWa7ecP7xSNk2Dx/3UEsDcY7NcFafxvNvKPmmO7HTwTxGQ==", + "dependencies": { + "backo2": "1.0.2", + "base64-arraybuffer": "0.1.5", + "component-bind": "1.0.0", + "component-emitter": "1.2.1", + "debug": "~3.1.0", + "engine.io-client": "~3.2.0", + "has-binary2": "~1.0.2", + "has-cors": "1.1.0", + "indexof": "0.0.1", + "object-component": "0.0.3", + "parseqs": "0.0.5", + "parseuri": "0.0.5", + "socket.io-parser": "~3.2.0", + "to-array": "0.1.4" + } + }, + "node_modules/socket.io-client/node_modules/component-emitter": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", + "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=" + }, + "node_modules/socket.io-parser": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.2.0.tgz", + "integrity": "sha512-FYiBx7rc/KORMJlgsXysflWx/RIvtqZbyGLlHZvjfmPTPeuD/I8MaW7cfFrj5tRltICJdgwflhfZ3NVVbVLFQA==", + "dependencies": { + "component-emitter": "1.2.1", + "debug": "~3.1.0", + "isarray": "2.0.1" + } + }, + "node_modules/socket.io-parser/node_modules/component-emitter": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", + "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=" + }, + "node_modules/socket.io-parser/node_modules/isarray": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.1.tgz", + "integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4=" + }, + "node_modules/socket.io-redis": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/socket.io-redis/-/socket.io-redis-5.2.0.tgz", + "integrity": "sha1-j+KtlEX8UIhvtwq8dZ1nQD1Ymd8=", + "dependencies": { + "debug": "~2.6.8", + "notepack.io": "~2.1.2", + "redis": "~2.8.0", + "socket.io-adapter": "~1.1.0", + "uid2": "0.0.3" + } + }, + "node_modules/socket.io-redis/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/socket.io-redis/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-resolve": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", + "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", + "dependencies": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0", + "resolve-url": "^0.2.1", + "source-map-url": "^0.4.0", + "urix": "^0.1.0" + } + }, + "node_modules/source-map-url": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz", + "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=" + }, + "node_modules/split-string": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", + "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "dependencies": { + "extend-shallow": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" + }, + "node_modules/stack-trace": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", + "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=", + "engines": { + "node": "*" + } + }, + "node_modules/static-extend": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", + "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", + "dependencies": { + "define-property": "^0.2.5", + "object-copy": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/static-extend/node_modules/define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dependencies": { + "is-descriptor": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/stream-browserify": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", + "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", + "dependencies": { + "inherits": "~2.0.1", + "readable-stream": "^2.0.2" + } + }, + "node_modules/stream-combiner2": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/stream-combiner2/-/stream-combiner2-1.1.1.tgz", + "integrity": "sha1-+02KFCDqNidk4hrUeAOXvry0HL4=", + "dependencies": { + "duplexer2": "~0.1.0", + "readable-stream": "^2.0.2" + } + }, + "node_modules/stream-http": { + "version": "2.8.3", + "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", + "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", + "dependencies": { + "builtin-status-codes": "^3.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.3.6", + "to-arraybuffer": "^1.0.0", + "xtend": "^4.0.0" + } + }, + "node_modules/stream-shift": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" + }, + "node_modules/stream-splicer": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/stream-splicer/-/stream-splicer-2.0.1.tgz", + "integrity": "sha512-Xizh4/NPuYSyAXyT7g8IvdJ9HJpxIGL9PjyhtywCZvvP0OPIdqyrr4dMikeuvY8xahpdKEBlBTySe583totajg==", + "dependencies": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.2" + } + }, + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dependencies": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dependencies": { + "ansi-regex": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/subarg": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/subarg/-/subarg-1.0.0.tgz", + "integrity": "sha1-9izxdYHplrSPyWVpn1TAauJouNI=", + "dependencies": { + "minimist": "^1.1.0" + } + }, + "node_modules/subarg/node_modules/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + }, + "node_modules/superagent": { + "version": "3.8.3", + "resolved": "https://registry.npmjs.org/superagent/-/superagent-3.8.3.tgz", + "integrity": "sha512-GLQtLMCoEIK4eDv6OGtkOoSMt3D+oq0y3dsxMuYuDvaNUvuT8eFBuLmfR0iYYzHC1e8hpzC6ZsxbuP6DIalMFA==", + "dependencies": { + "component-emitter": "^1.2.0", + "cookiejar": "^2.1.0", + "debug": "^3.1.0", + "extend": "^3.0.0", + "form-data": "^2.3.1", + "formidable": "^1.2.0", + "methods": "^1.1.1", + "mime": "^1.4.1", + "qs": "^6.5.1", + "readable-stream": "^2.3.5" + }, + "engines": { + "node": ">= 4.0" + } + }, + "node_modules/supports-color": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", + "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/syntax-error": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/syntax-error/-/syntax-error-1.4.0.tgz", + "integrity": "sha512-YPPlu67mdnHGTup2A8ff7BC2Pjq0e0Yp/IyTFN03zWO0RcK07uLcbi7C2KpGR2FvWbaB0+bfE27a+sBKebSo7w==", + "dependencies": { + "acorn-node": "^1.2.0" + } + }, + "node_modules/taffydb": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/taffydb/-/taffydb-2.6.2.tgz", + "integrity": "sha1-fLy2S1oUG2ou/CxdLGe04VCyomg=" + }, + "node_modules/tar-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", + "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", + "dependencies": { + "bl": "^1.0.0", + "buffer-alloc": "^1.2.0", + "end-of-stream": "^1.0.0", + "fs-constants": "^1.0.0", + "readable-stream": "^2.3.0", + "to-buffer": "^1.1.1", + "xtend": "^4.0.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/text-hex": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-0.0.0.tgz", + "integrity": "sha1-V4+8haapJjbkLdF7QdAhjM6esrM=" + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" + }, + "node_modules/through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/timed-out": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-2.0.0.tgz", + "integrity": "sha1-84sK6B03R9YoAB9B2vxlKs5nHAo=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/timers-browserify": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-1.4.2.tgz", + "integrity": "sha1-ycWLV1voQHN1y14kYtrO50NZ9B0=", + "dependencies": { + "process": "~0.11.0" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/timers-ext": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/timers-ext/-/timers-ext-0.1.7.tgz", + "integrity": "sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==", + "dependencies": { + "es5-ext": "~0.10.46", + "next-tick": "1" + } + }, + "node_modules/to-array": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/to-array/-/to-array-0.1.4.tgz", + "integrity": "sha1-F+bBH3PdTz10zaek/zI46a2b+JA=" + }, + "node_modules/to-arraybuffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", + "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=" + }, + "node_modules/to-buffer": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", + "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==" + }, + "node_modules/to-object-path": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", + "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/to-regex": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", + "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "dependencies": { + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "regex-not": "^1.0.2", + "safe-regex": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/to-regex-range": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", + "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", + "dependencies": { + "is-number": "^3.0.0", + "repeat-string": "^1.6.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/to-regex-range/node_modules/is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "dependencies": { + "kind-of": "^3.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/to-utf8": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/to-utf8/-/to-utf8-0.0.1.tgz", + "integrity": "sha1-0Xrqcv8vujm55DYBvns/9y4ImFI=" + }, + "node_modules/toidentifier": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", + "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/tty-browserify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.1.tgz", + "integrity": "sha512-C3TaO7K81YvjCgQH9Q1S3R3P3BtN3RIM8n+OvX4il1K1zgE8ZhI0op7kClgkxtutIE8hQrcrHBXvIheqKUUCxw==" + }, + "node_modules/type": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", + "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==" + }, + "node_modules/type-detect": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-1.0.0.tgz", + "integrity": "sha1-diIXzAbbJY7EiQihKY6LlRIejqI=", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" + }, + "node_modules/uc.micro": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", + "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" + }, + "node_modules/uid2": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/uid2/-/uid2-0.0.3.tgz", + "integrity": "sha1-SDEm4Rd03y9xuLY53NeZw3YWK4I=" + }, + "node_modules/ultron": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz", + "integrity": "sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og==" + }, + "node_modules/umd": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/umd/-/umd-3.0.3.tgz", + "integrity": "sha512-4IcGSufhFshvLNcMCV80UnQVlZ5pMOC8mvNPForqwA4+lzYQuetTESLDQkeLmihq8bRcnpbQa48Wb8Lh16/xow==", + "bin": { + "umd": "bin/cli.js" + } + }, + "node_modules/undeclared-identifiers": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/undeclared-identifiers/-/undeclared-identifiers-1.1.3.tgz", + "integrity": "sha512-pJOW4nxjlmfwKApE4zvxLScM/njmwj/DiUBv7EabwE4O8kRUy+HIwxQtZLBPll/jx1LJyBcqNfB3/cpv9EZwOw==", + "dependencies": { + "acorn-node": "^1.3.0", + "dash-ast": "^1.0.0", + "get-assigned-identifiers": "^1.2.0", + "simple-concat": "^1.0.0", + "xtend": "^4.0.1" + }, + "bin": { + "undeclared-identifiers": "bin.js" + } + }, + "node_modules/underscore": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.9.1.tgz", + "integrity": "sha512-5/4etnCkd9c8gwgowi5/om/mYO5ajCaOgdzj/oW+0eQV9WxKBDZw5+ycmKmeaTXjInS/W0BzpGLo2xR2aBwZdg==" + }, + "node_modules/union-value": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", + "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", + "dependencies": { + "arr-union": "^3.1.0", + "get-value": "^2.0.6", + "is-extendable": "^0.1.1", + "set-value": "^2.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/unset-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", + "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", + "dependencies": { + "has-value": "^0.3.1", + "isobject": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unset-value/node_modules/has-value": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", + "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", + "dependencies": { + "get-value": "^2.0.3", + "has-values": "^0.1.4", + "isobject": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unset-value/node_modules/has-value/node_modules/isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "dependencies": { + "isarray": "1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unset-value/node_modules/has-values": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", + "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unset-value/node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/uritemplate": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/uritemplate/-/uritemplate-0.3.4.tgz", + "integrity": "sha1-BdCoU/+8iw9Jqj1NKtd3sNHuBww=" + }, + "node_modules/urix": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", + "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=", + "deprecated": "Please see https://github.com/lydell/urix#deprecated" + }, + "node_modules/url": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", + "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", + "dependencies": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "node_modules/urllib": { + "version": "2.11.1", + "resolved": "https://registry.npmjs.org/urllib/-/urllib-2.11.1.tgz", + "integrity": "sha1-5F1Xnxu+Qsn64hzf9yVo88jIyUU=", + "dependencies": { + "any-promise": "^1.2.0", + "debug": "^2.2.0", + "default-user-agent": "^1.0.0", + "digest-header": "^0.0.1", + "humanize-ms": "^1.2.0", + "iconv-lite": "^0.4.13", + "media-typer": "^0.3.0", + "statuses": "^1.3.0" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/urllib-sync": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/urllib-sync/-/urllib-sync-1.1.4.tgz", + "integrity": "sha1-yRMI9JkaZe5iDWc85g/dJLvRjMo=", + "dependencies": { + "urllib": "~2.11.0", + "utility": "~1.7.1" + } + }, + "node_modules/urllib/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/urllib/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/use": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", + "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/util": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", + "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "dependencies": { + "inherits": "2.0.3" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + }, + "node_modules/utility": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/utility/-/utility-1.7.1.tgz", + "integrity": "sha1-+3TN3IFqQRJ2ym6MqZMkfyPusKc=", + "dependencies": { + "copy-to": "~2.0.1", + "escape-html": "~1.0.3" + }, + "engines": { + "node": ">= 0.12.0" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.1.0.tgz", + "integrity": "sha512-DIWtzUkw04M4k3bf1IcpS2tngXEL26YUD2M0tMDUpnUrz2hgzUBlD55a4FjdLGPvfHxS6uluGWvaVEqgBcVa+g==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/validator": { + "version": "10.11.0", + "resolved": "https://registry.npmjs.org/validator/-/validator-10.11.0.tgz", + "integrity": "sha512-X/p3UZerAIsbBfN/IwahhYaBbY68EN/UQBWHtsbXGT5bfrH/p4NQzUCG1kF/rtKaNpnJ7jAu6NGTdSNtyNIXMw==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vm-browserify": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", + "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==" + }, + "node_modules/webgme": { + "version": "2.42.0", + "resolved": "https://registry.npmjs.org/webgme/-/webgme-2.42.0.tgz", + "integrity": "sha512-QG6v+G4nERYyjstAw7V1h1gkIKW7DNrDLXedaQ3Bg6og3VGD74MujKm8V45YnqiDyq2a8kVu2uOjOKpKa42OvQ==", + "hasInstallScript": true, + "dependencies": { + "bower": "1.8.8", + "q": "1.5.1", + "require-uncached": "1.0.3", + "requirejs": "2.3.5", + "webgme-engine": "2.25.1", + "webgme-user-management-page": "^0.5.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/webgme-autoviz": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/webgme-autoviz/-/webgme-autoviz-2.2.1.tgz", + "integrity": "sha1-O7RGprOlAXCOsdf9upB3+M6wuvM=", + "peerDependencies": { + "webgme": "^2.0.0" + } + }, + "node_modules/webgme-easydag": { + "version": "1.2.0", + "resolved": "git+ssh://git@github.com/dfst/webgme-easydag.git#cb461f2687c8a2aa00adc827ea3688b7f0e24ada", + "integrity": "sha512-FGlGEizf/rG4PLNDt511YrEAOXkJh5EFwE++UZsCP07qb2fUfDjBErMHsj8Awi5g/NK2JcgFpMn3y/EDY2Ff4g==", + "peerDependencies": { + "webgme": "^2.6.0" + } + }, + "node_modules/webgme-engine": { + "version": "2.25.1", + "resolved": "https://registry.npmjs.org/webgme-engine/-/webgme-engine-2.25.1.tgz", + "integrity": "sha512-yZreptu5SR/esPLGdqij6YDgW35qm7SfqITGrCkMtW+XtSm9OH2eM/7prG/iQo+Ge2i6f+2YZmCmOLRnXlvC3A==", + "hasInstallScript": true, + "dependencies": { + "adm-zip": "0.4.11", + "agentkeepalive": "3.4.1", + "archiver": "2.1.1", + "aws-sdk": "2.260.1", + "bcryptjs": "2.4.3", + "body-parser": "1.18.3", + "browserify": "16.2.3", + "buffer-equal-constant-time": "1.0.1", + "chance": "1.0.16", + "commander": "2.15.1", + "compression": "1.7.2", + "connect-multiparty": "2.1.0", + "content-disposition": "0.5.2", + "cookie-parser": "1.4.3", + "debug": "3.1.0", + "ejs": "2.6.1", + "express": "4.16.3", + "ink-docstrap": "1.3.2", + "jsdoc": "3.6.3", + "jsonwebtoken": "8.3.0", + "marked": "^1.1.0", + "method-override": "2.3.10", + "mime": "^1.6.0", + "minimatch": "3.0.4", + "mongo-uri": "0.1.2", + "mongodb": "2.2.35", + "ncp": "2.0.0", + "nodemailer": "^6.4.6", + "q": "1.5.1", + "raml2html": "^3.0.1", + "redis": "2.8.0", + "require-uncached": "1.0.3", + "requirejs": "2.3.5", + "requirejs-text": "2.0.15", + "socket.io": "2.1.1", + "socket.io-client": "2.1.1", + "socket.io-redis": "5.2.0", + "superagent": "3.8.3", + "underscore": "1.9.1", + "webgme-ot": "0.0.16", + "webgme-rust-components": "github:webgme/webgme-rust-components", + "webgme-webhook-manager": "0.1.1", + "winston": "2.4.3" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/webgme-json-importer": { + "version": "1.0.0", + "resolved": "git+ssh://git@github.com/deepforge-dev/webgme-json-importer.git#6e60149e2ec5ef946b3749bf194c893a1e72a424", + "integrity": "sha512-TXoWmcTcvQaxcGtYYCE6xNAMPsbZRbtMDIdgcrGxssfelo+4kQ+cQBwdPVRbl6LrnU+PEJcHMNvLpJUZLU+/EQ==", + "peerDependencies": { + "webgme": "^2.23.0" + } + }, + "node_modules/webgme-ot": { + "version": "0.0.16", + "resolved": "https://registry.npmjs.org/webgme-ot/-/webgme-ot-0.0.16.tgz", + "integrity": "sha512-Aict9Ka1tDDXZ9mZ9BX/4F3AV/KVf1qSoxK0UHtfxM1sPuGr3a4nXAhnccl/3jbHEjSHNphn71lmfff6y3+HmA==" + }, + "node_modules/webgme-rust-components": { + "name": "webgme-rust", + "version": "0.1.0", + "resolved": "git+ssh://git@github.com/webgme/webgme-rust-components.git#ad446234b6c02fd722e7e454015857ee523fb172", + "integrity": "sha512-jS1rMUMLb7hXMhBheFxiTA+yLw068o3cheiFIH0vszWN8xGIjGXZn8YrzOmK2hkAu3HLF4lT7nuTYJ+uSZUtpA==" + }, + "node_modules/webgme-simple-nodes": { + "version": "2.1.3", + "resolved": "git+ssh://git@github.com/brollb/webgme-simple-nodes.git#2a9fc79c93efd55067ef7c2e1559f9b31b0f97e5", + "integrity": "sha512-WhK6thWo5fns7zuN1EUSgX0qShOMMPIS/AIgrRMagt0ZR7dWeW9wLYwydM3dQ5tqtvpNY2GP9BZuuSGLd3V6BA==", + "peerDependencies": { + "webgme": "^2.0.0" + } + }, + "node_modules/webgme-user-management-page": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/webgme-user-management-page/-/webgme-user-management-page-0.5.0.tgz", + "integrity": "sha512-sk/sYFTiVf5ntGoNi/ZdZQsufG5UasnZCjThZW0IsW8WeuD0Njak6gVQwMLOid5XX2nLNpAmjGKSrowF0YyoLw==", + "dependencies": { + "body-parser": "^1.19.0", + "ejs": "^2.7.4", + "express": "4.16.3" + } + }, + "node_modules/webgme-user-management-page/node_modules/body-parser": { + "version": "1.19.0", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", + "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", + "dependencies": { + "bytes": "3.1.0", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "~1.1.2", + "http-errors": "1.7.2", + "iconv-lite": "0.4.24", + "on-finished": "~2.3.0", + "qs": "6.7.0", + "raw-body": "2.4.0", + "type-is": "~1.6.17" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/webgme-user-management-page/node_modules/bytes": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", + "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/webgme-user-management-page/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/webgme-user-management-page/node_modules/ejs": { + "version": "2.7.4", + "resolved": "https://registry.npmjs.org/ejs/-/ejs-2.7.4.tgz", + "integrity": "sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA==", + "hasInstallScript": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/webgme-user-management-page/node_modules/http-errors": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", + "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.1", + "statuses": ">= 1.5.0 < 2", + "toidentifier": "1.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webgme-user-management-page/node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/webgme-user-management-page/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/webgme-user-management-page/node_modules/qs": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", + "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/webgme-user-management-page/node_modules/raw-body": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", + "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", + "dependencies": { + "bytes": "3.1.0", + "http-errors": "1.7.2", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/webgme-user-management-page/node_modules/setprototypeof": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", + "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" + }, + "node_modules/webgme-webhook-manager": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/webgme-webhook-manager/-/webgme-webhook-manager-0.1.1.tgz", + "integrity": "sha512-04uqNF6sNRenJoJwHaLDlPr3516HyghbVInyc0BcQ0OIlH3Je1+YnHYGyFcPzOyWCbKYumoeRCBNkRrsJHLUnw==", + "dependencies": { + "mongodb": "^2.1.18", + "msgpack-js": "^0.3.0", + "q": "^1.4.1", + "redis": "2.6.2", + "superagent": "^3.8.3" + } + }, + "node_modules/webgme-webhook-manager/node_modules/redis": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/redis/-/redis-2.6.2.tgz", + "integrity": "sha1-fMqwVjATrGGefdhMZRK4HT2FJXk=", + "dependencies": { + "double-ended-queue": "^2.1.0-0", + "redis-commands": "^1.2.0", + "redis-parser": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/win-release": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/win-release/-/win-release-1.1.1.tgz", + "integrity": "sha1-X6VeAr58qTTt/BJmVjLoSbcuUgk=", + "dependencies": { + "semver": "^5.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/window-size": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.4.tgz", + "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=", + "bin": { + "window-size": "cli.js" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/winston": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/winston/-/winston-2.4.3.tgz", + "integrity": "sha512-GYKuysPz2pxYAVJD2NPsDLP5Z79SDEzPm9/j4tCjkF/n89iBNGBMJcR+dMUqxgPNgoSs6fVygPi+Vl2oxIpBuw==", + "dependencies": { + "async": "~1.0.0", + "colors": "1.0.x", + "cycle": "1.0.x", + "eyes": "0.1.x", + "isstream": "0.1.x", + "stack-trace": "0.0.x" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/winston/node_modules/async": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async/-/async-1.0.0.tgz", + "integrity": "sha1-+PwEyjoTeErenhZBr5hXjPvWR6k=" + }, + "node_modules/wrap-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", + "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", + "dependencies": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + }, + "node_modules/ws": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-3.3.3.tgz", + "integrity": "sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==", + "dependencies": { + "async-limiter": "~1.0.0", + "safe-buffer": "~5.1.0", + "ultron": "~1.1.0" + } + }, + "node_modules/xml2js": { + "version": "0.4.17", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.17.tgz", + "integrity": "sha1-F76T6q4/O3eTWceVtBlwWogX6Gg=", + "dependencies": { + "sax": ">=0.6.0", + "xmlbuilder": "^4.1.0" + } + }, + "node_modules/xmlbuilder": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-4.2.1.tgz", + "integrity": "sha1-qlijBBoGb5DqoWwvU4n/GfP0YaU=", + "dependencies": { + "lodash": "^4.0.0" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/xmlcreate": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-2.0.3.tgz", + "integrity": "sha512-HgS+X6zAztGa9zIK3Y3LXuJes33Lz9x+YyTxgrkIdabu2vqcGOWwdfCpf1hWLRrd553wd4QCDf6BBO6FfdsRiQ==" + }, + "node_modules/xmlhttprequest-ssl": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.5.tgz", + "integrity": "sha1-wodrBhaKrcQOV9l+gRkayPQ5iz4=", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz", + "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=" + }, + "node_modules/yargs": { + "version": "3.32.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.32.0.tgz", + "integrity": "sha1-AwiOnr+edWtpdRYR0qXvWRSCyZU=", + "dependencies": { + "camelcase": "^2.0.1", + "cliui": "^3.0.3", + "decamelize": "^1.1.1", + "os-locale": "^1.4.0", + "string-width": "^1.0.1", + "window-size": "^0.1.4", + "y18n": "^3.2.0" + } + }, + "node_modules/yeast": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/yeast/-/yeast-0.1.2.tgz", + "integrity": "sha1-AI4G2AlDIMNy28L47XagymyKxBk=" + }, + "node_modules/z-schema": { + "version": "3.25.1", + "resolved": "https://registry.npmjs.org/z-schema/-/z-schema-3.25.1.tgz", + "integrity": "sha512-7tDlwhrBG+oYFdXNOjILSurpfQyuVgkRe3hB2q8TEssamDHB7BbLWYkYO98nTn0FibfdFroFKDjndbgufAgS/Q==", + "dependencies": { + "core-js": "^2.5.7", + "lodash.get": "^4.0.0", + "lodash.isequal": "^4.0.0", + "validator": "^10.0.0" + }, + "bin": { + "z-schema": "bin/z-schema" + }, + "optionalDependencies": { + "commander": "^2.7.1" + } + }, + "node_modules/zeromq": { + "version": "6.0.0-beta.6", + "resolved": "https://registry.npmjs.org/zeromq/-/zeromq-6.0.0-beta.6.tgz", + "integrity": "sha512-wLf6M7pBHijl+BRltUL2VoDpgbQcOZetiX8UzycHL8CcYFxYnRrpoG5fi3UX3+Umavz1lk4/dGaQez8qiDgr/Q==", + "hasInstallScript": true, + "dependencies": { + "node-gyp-build": "^4.1.0" + }, + "engines": { + "node": ">= 10.2" + } + }, + "node_modules/zip-stream": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/zip-stream/-/zip-stream-1.2.0.tgz", + "integrity": "sha1-qLxF9MG0lpnGuQGYuqyqzbzUugQ=", + "dependencies": { + "archiver-utils": "^1.3.0", + "compress-commons": "^1.2.0", + "lodash": "^4.8.0", + "readable-stream": "^2.0.0" + }, + "engines": { + "node": ">= 0.10.0" + } + } + }, "dependencies": { "@babel/parser": { "version": "7.11.2", "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.11.2.tgz", - "integrity": "sha512-Vuj/+7vLo6l1Vi7uuO+1ngCDNeVmNbTngcJFKCR/oEtz8tKz0CJxZEGmPt9KcIloZhOZ3Zit6xbpXT2MDlS9Vw==", - "dev": true - }, - "JSONStream": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", - "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==", - "dev": true, - "requires": { - "jsonparse": "^1.2.0", - "through": ">=2.2.7 <3" - } + "integrity": "sha512-Vuj/+7vLo6l1Vi7uuO+1ngCDNeVmNbTngcJFKCR/oEtz8tKz0CJxZEGmPt9KcIloZhOZ3Zit6xbpXT2MDlS9Vw==" }, "accepts": { "version": "1.3.7", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", - "dev": true, "requires": { "mime-types": "~2.1.24", "negotiator": "0.6.2" @@ -33,14 +6792,12 @@ "acorn": { "version": "7.4.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.0.tgz", - "integrity": "sha512-+G7P8jJmCHr+S+cLfQxygbWhXy+8YTVGzAkpEbcLo2mLoL7tij/VG41QSHACSf5QgYRhMZYHuNc6drJaO0Da+w==", - "dev": true + "integrity": "sha512-+G7P8jJmCHr+S+cLfQxygbWhXy+8YTVGzAkpEbcLo2mLoL7tij/VG41QSHACSf5QgYRhMZYHuNc6drJaO0Da+w==" }, "acorn-node": { "version": "1.8.2", "resolved": "https://registry.npmjs.org/acorn-node/-/acorn-node-1.8.2.tgz", "integrity": "sha512-8mt+fslDufLYntIoPAaIMUe/lrbrehIiwmR3t2k9LljIzoigEPF27eLk2hy8zSGzmR/ogr7zbRKINMo1u0yh5A==", - "dev": true, "requires": { "acorn": "^7.0.0", "acorn-walk": "^7.0.0", @@ -50,32 +6807,27 @@ "acorn-walk": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz", - "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==", - "dev": true + "integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==" }, "address": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/address/-/address-1.1.2.tgz", - "integrity": "sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==", - "dev": true + "integrity": "sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==" }, "adm-zip": { "version": "0.4.11", "resolved": "https://registry.npmjs.org/adm-zip/-/adm-zip-0.4.11.tgz", - "integrity": "sha512-L8vcjDTCOIJk7wFvmlEUN7AsSb8T+2JrdP7KINBjzr24TJ5Mwj590sLu3BC7zNZowvJWa/JtPmD8eJCzdtDWjA==", - "dev": true + "integrity": "sha512-L8vcjDTCOIJk7wFvmlEUN7AsSb8T+2JrdP7KINBjzr24TJ5Mwj590sLu3BC7zNZowvJWa/JtPmD8eJCzdtDWjA==" }, "after": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/after/-/after-0.8.2.tgz", - "integrity": "sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8=", - "dev": true + "integrity": "sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8=" }, "agentkeepalive": { "version": "3.4.1", "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-3.4.1.tgz", "integrity": "sha512-MPIwsZU9PP9kOrZpyu2042kYA8Fdt/AedQYkYXucHgF9QoD9dXVp0ypuGnHXSR0hTstBxdt85Xkh4JolYfK5wg==", - "dev": true, "requires": { "humanize-ms": "^1.2.1" } @@ -83,14 +6835,12 @@ "ansi-regex": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=" }, "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, "requires": { "color-convert": "^1.9.0" } @@ -98,14 +6848,12 @@ "any-promise": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha1-q8av7tzqUugJzcA3au0845Y10X8=", - "dev": true + "integrity": "sha1-q8av7tzqUugJzcA3au0845Y10X8=" }, "anymatch": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz", "integrity": "sha512-0XNayC8lTHQ2OI8aljNCN3sSx6hsr/1+rlcDAotXJR7C1oZZHCNsfpbKwMjRA3Uqb5tF1Rae2oloTr4xpq+WjA==", - "dev": true, "requires": { "micromatch": "^2.1.5", "normalize-path": "^2.0.0" @@ -115,7 +6863,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/archiver/-/archiver-2.1.1.tgz", "integrity": "sha1-/2YrSnggFJSj7lRNOjP+dJZQnrw=", - "dev": true, "requires": { "archiver-utils": "^1.3.0", "async": "^2.0.0", @@ -131,7 +6878,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/archiver-utils/-/archiver-utils-1.3.0.tgz", "integrity": "sha1-5QtMCccL89aA4y/xt5lOn52JUXQ=", - "dev": true, "requires": { "glob": "^7.0.0", "graceful-fs": "^4.1.0", @@ -144,14 +6890,12 @@ "argh": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/argh/-/argh-0.1.4.tgz", - "integrity": "sha1-PrTWEpc/xrbcbvM49W91nyrFw6Y=", - "dev": true + "integrity": "sha1-PrTWEpc/xrbcbvM49W91nyrFw6Y=" }, "argparse": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, "requires": { "sprintf-js": "~1.0.2" } @@ -160,7 +6904,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-2.0.0.tgz", "integrity": "sha1-jzuCf5Vai9ZpaX5KQlasPOrjVs8=", - "dev": true, "requires": { "arr-flatten": "^1.0.1" } @@ -168,44 +6911,37 @@ "arr-flatten": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", - "dev": true + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==" }, "arr-union": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", - "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", - "dev": true + "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=" }, "array-flatten": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=", - "dev": true + "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" }, "array-unique": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.2.1.tgz", - "integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM=", - "dev": true + "integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM=" }, "arraybuffer.slice": { "version": "0.0.7", "resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.7.tgz", - "integrity": "sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog==", - "dev": true + "integrity": "sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog==" }, "asap": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", - "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=", - "dev": true + "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=" }, "asn1.js": { "version": "4.10.1", "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", - "dev": true, "requires": { "bn.js": "^4.0.0", "inherits": "^2.0.1", @@ -215,8 +6951,7 @@ "bn.js": { "version": "4.11.9", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", - "dev": true + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" } } }, @@ -224,7 +6959,6 @@ "version": "1.5.0", "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz", "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==", - "dev": true, "requires": { "object-assign": "^4.1.1", "util": "0.10.3" @@ -233,14 +6967,12 @@ "inherits": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", - "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=", - "dev": true + "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=" }, "util": { "version": "0.10.3", "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz", "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=", - "dev": true, "requires": { "inherits": "2.0.1" } @@ -256,14 +6988,12 @@ "assign-symbols": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", - "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", - "dev": true + "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=" }, "async": { "version": "2.6.3", "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", - "dev": true, "requires": { "lodash": "^4.17.14" } @@ -271,32 +7001,27 @@ "async-each": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz", - "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==", - "dev": true + "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==" }, "async-limiter": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", - "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==", - "dev": true + "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" }, "asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", - "dev": true + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" }, "atob": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", - "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", - "dev": true + "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==" }, "aws-sdk": { "version": "2.260.1", "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.260.1.tgz", "integrity": "sha512-NZ5nPImMQD4ULLPbbpBDt7d9aludsYBttOd4dtlxxy+IANrDn9meQn591xOULwaE9usrtbzWkJJFXop3wpznTQ==", - "dev": true, "requires": { "buffer": "4.9.1", "events": "1.1.1", @@ -313,7 +7038,6 @@ "version": "4.9.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.1.tgz", "integrity": "sha1-bRu2AbB6TvztlwlBMgkwJ8lbwpg=", - "dev": true, "requires": { "base64-js": "^1.0.2", "ieee754": "^1.1.4", @@ -323,16 +7047,14 @@ "ieee754": { "version": "1.1.8", "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.8.tgz", - "integrity": "sha1-vjPUCsEO8ZJnAfbwii2G+/0a0+Q=", - "dev": true + "integrity": "sha1-vjPUCsEO8ZJnAfbwii2G+/0a0+Q=" } } }, "backo2": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/backo2/-/backo2-1.0.2.tgz", - "integrity": "sha1-MasayLEpNjRj41s+u2n038+6eUc=", - "dev": true + "integrity": "sha1-MasayLEpNjRj41s+u2n038+6eUc=" }, "balanced-match": { "version": "1.0.0", @@ -343,7 +7065,6 @@ "version": "0.11.2", "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", - "dev": true, "requires": { "cache-base": "^1.0.1", "class-utils": "^0.3.5", @@ -358,7 +7079,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dev": true, "requires": { "is-descriptor": "^1.0.0" } @@ -367,7 +7087,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, "requires": { "kind-of": "^6.0.0" } @@ -376,7 +7095,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, "requires": { "kind-of": "^6.0.0" } @@ -385,7 +7103,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -395,46 +7112,39 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" }, "kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==" } } }, "base64-arraybuffer": { "version": "0.1.5", "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz", - "integrity": "sha1-c5JncZI7Whl0etZmqlzUv5xunOg=", - "dev": true + "integrity": "sha1-c5JncZI7Whl0etZmqlzUv5xunOg=" }, "base64-js": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", - "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==", - "dev": true + "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==" }, "base64id": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/base64id/-/base64id-1.0.0.tgz", - "integrity": "sha1-R2iMuZu2gE8OBtPnY7HDLlfY5rY=", - "dev": true + "integrity": "sha1-R2iMuZu2gE8OBtPnY7HDLlfY5rY=" }, "bcryptjs": { "version": "2.4.3", "resolved": "https://registry.npmjs.org/bcryptjs/-/bcryptjs-2.4.3.tgz", - "integrity": "sha1-mrVie5PmBiH/fNrF2pczAn3x0Ms=", - "dev": true + "integrity": "sha1-mrVie5PmBiH/fNrF2pczAn3x0Ms=" }, "better-assert": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/better-assert/-/better-assert-1.0.2.tgz", "integrity": "sha1-QIZrnhueC1W0gYlDEeaPr/rrxSI=", - "dev": true, "requires": { "callsite": "1.0.0" } @@ -442,14 +7152,12 @@ "binary-extensions": { "version": "1.13.1", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz", - "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", - "dev": true + "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==" }, "bindings": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "dev": true, "optional": true, "requires": { "file-uri-to-path": "1.0.0" @@ -459,7 +7167,6 @@ "version": "1.2.3", "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz", "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", - "dev": true, "requires": { "readable-stream": "^2.3.5", "safe-buffer": "^5.1.1" @@ -468,26 +7175,22 @@ "blob": { "version": "0.0.5", "resolved": "https://registry.npmjs.org/blob/-/blob-0.0.5.tgz", - "integrity": "sha512-gaqbzQPqOoamawKg0LGVd7SzLgXS+JH61oWprSLH+P+abTczqJbhTR8CmJ2u9/bUYNmHTGJx/UEmn6doAvvuig==", - "dev": true + "integrity": "sha512-gaqbzQPqOoamawKg0LGVd7SzLgXS+JH61oWprSLH+P+abTczqJbhTR8CmJ2u9/bUYNmHTGJx/UEmn6doAvvuig==" }, "bluebird": { "version": "3.7.2", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true + "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" }, "bn.js": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.2.tgz", - "integrity": "sha512-40rZaf3bUNKTVYu9sIeeEGOg7g14Yvnj9kH7b50EiwX0Q7A6umbvfI5tvHaOERH0XigqKkfLkFQxzb4e6CIXnA==", - "dev": true + "integrity": "sha512-40rZaf3bUNKTVYu9sIeeEGOg7g14Yvnj9kH7b50EiwX0Q7A6umbvfI5tvHaOERH0XigqKkfLkFQxzb4e6CIXnA==" }, "body-parser": { "version": "1.18.3", "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz", "integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=", - "dev": true, "requires": { "bytes": "3.0.0", "content-type": "~1.0.4", @@ -505,7 +7208,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -513,8 +7215,7 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } }, @@ -522,7 +7223,6 @@ "version": "0.0.7", "resolved": "https://registry.npmjs.org/bops/-/bops-0.0.7.tgz", "integrity": "sha1-tKClqDmkBkVK8P4FqLkaenZqVOI=", - "dev": true, "requires": { "base64-js": "0.0.2", "to-utf8": "0.0.1" @@ -531,16 +7231,14 @@ "base64-js": { "version": "0.0.2", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-0.0.2.tgz", - "integrity": "sha1-Ak8Pcq+iW3X5wO5zzU9V7Bvtl4Q=", - "dev": true + "integrity": "sha1-Ak8Pcq+iW3X5wO5zzU9V7Bvtl4Q=" } } }, "bower": { "version": "1.8.8", "resolved": "https://registry.npmjs.org/bower/-/bower-1.8.8.tgz", - "integrity": "sha512-1SrJnXnkP9soITHptSO+ahx3QKp3cVzn8poI6ujqc5SeOkg5iqM1pK9H+DSc2OQ8SnO0jC/NG4Ur/UIwy7574A==", - "dev": true + "integrity": "sha512-1SrJnXnkP9soITHptSO+ahx3QKp3cVzn8poI6ujqc5SeOkg5iqM1pK9H+DSc2OQ8SnO0jC/NG4Ur/UIwy7574A==" }, "brace-expansion": { "version": "1.1.8", @@ -555,7 +7253,6 @@ "version": "1.8.5", "resolved": "https://registry.npmjs.org/braces/-/braces-1.8.5.tgz", "integrity": "sha1-uneWLhLf+WnWt2cR6RS3N4V79qc=", - "dev": true, "requires": { "expand-range": "^1.8.1", "preserve": "^0.2.0", @@ -565,18 +7262,16 @@ "brorand": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=", - "dev": true + "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=" }, "browser-pack": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/browser-pack/-/browser-pack-6.1.0.tgz", "integrity": "sha512-erYug8XoqzU3IfcU8fUgyHqyOXqIE4tUTTQ+7mqUjQlvnXkOO6OlT9c/ZoJVHYoAaqGxr09CN53G7XIsO4KtWA==", - "dev": true, "requires": { - "JSONStream": "^1.0.3", "combine-source-map": "~0.8.0", "defined": "^1.0.0", + "JSONStream": "^1.0.3", "safe-buffer": "^5.1.1", "through2": "^2.0.0", "umd": "^3.0.0" @@ -586,7 +7281,6 @@ "version": "1.11.3", "resolved": "https://registry.npmjs.org/browser-resolve/-/browser-resolve-1.11.3.tgz", "integrity": "sha512-exDi1BYWB/6raKHmDTCicQfTkqwN5fioMFV4j8BsfMU4R2DK/QfZfK7kOVkmWCNANf0snkBzqGqAJBao9gZMdQ==", - "dev": true, "requires": { "resolve": "1.1.7" }, @@ -594,8 +7288,7 @@ "resolve": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.1.7.tgz", - "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=", - "dev": true + "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=" } } }, @@ -609,9 +7302,7 @@ "version": "16.2.3", "resolved": "https://registry.npmjs.org/browserify/-/browserify-16.2.3.tgz", "integrity": "sha512-zQt/Gd1+W+IY+h/xX2NYMW4orQWhqSwyV+xsblycTtpOuB27h1fZhhNQuipJ4t79ohw4P4mMem0jp/ZkISQtjQ==", - "dev": true, "requires": { - "JSONStream": "^1.0.3", "assert": "^1.4.0", "browser-pack": "^6.0.1", "browser-resolve": "^1.11.0", @@ -633,6 +7324,7 @@ "https-browserify": "^1.0.0", "inherits": "~2.0.1", "insert-module-globals": "^7.0.0", + "JSONStream": "^1.0.3", "labeled-stream-splicer": "^2.0.0", "mkdirp": "^0.5.0", "module-deps": "^6.0.0", @@ -664,14 +7356,12 @@ "events": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/events/-/events-2.1.0.tgz", - "integrity": "sha512-3Zmiobend8P9DjmKAty0Era4jV8oJ0yGYe2nJJAxgymF9+N8F2m0hhZiMoWtcfepExzNKZumFU3ksdQbInGWCg==", - "dev": true + "integrity": "sha512-3Zmiobend8P9DjmKAty0Era4jV8oJ0yGYe2nJJAxgymF9+N8F2m0hhZiMoWtcfepExzNKZumFU3ksdQbInGWCg==" }, "url": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", - "dev": true, "requires": { "punycode": "1.3.2", "querystring": "0.2.0" @@ -683,7 +7373,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", - "dev": true, "requires": { "buffer-xor": "^1.0.3", "cipher-base": "^1.0.0", @@ -697,7 +7386,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", - "dev": true, "requires": { "browserify-aes": "^1.0.4", "browserify-des": "^1.0.0", @@ -708,7 +7396,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", - "dev": true, "requires": { "cipher-base": "^1.0.1", "des.js": "^1.0.0", @@ -720,7 +7407,6 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz", "integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=", - "dev": true, "requires": { "bn.js": "^4.1.0", "randombytes": "^2.0.1" @@ -729,8 +7415,7 @@ "bn.js": { "version": "4.11.9", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", - "dev": true + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" } } }, @@ -738,7 +7423,6 @@ "version": "4.2.1", "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz", "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==", - "dev": true, "requires": { "bn.js": "^5.1.1", "browserify-rsa": "^4.0.1", @@ -754,14 +7438,12 @@ "inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "readable-stream": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dev": true, "requires": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", @@ -771,8 +7453,7 @@ "safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "dev": true + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" } } }, @@ -780,7 +7461,6 @@ "version": "0.2.0", "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", - "dev": true, "requires": { "pako": "~1.0.5" } @@ -788,14 +7468,12 @@ "bson": { "version": "1.0.9", "resolved": "https://registry.npmjs.org/bson/-/bson-1.0.9.tgz", - "integrity": "sha512-IQX9/h7WdMBIW/q/++tGd+emQr0XMdeZ6icnT/74Xk9fnabWn+gZgpE+9V+gujL3hhJOoNrnDVY7tWdzc7NUTg==", - "dev": true + "integrity": "sha512-IQX9/h7WdMBIW/q/++tGd+emQr0XMdeZ6icnT/74Xk9fnabWn+gZgpE+9V+gujL3hhJOoNrnDVY7tWdzc7NUTg==" }, "buffer": { "version": "5.6.0", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.6.0.tgz", "integrity": "sha512-/gDYp/UtU0eA1ys8bOs9J6a+E/KWIY+DZ+Q2WESNUA0jFRsJOc0SNUO6xJ5SGA1xueg3NL65W6s+NY5l9cunuw==", - "dev": true, "requires": { "base64-js": "^1.0.2", "ieee754": "^1.1.4" @@ -805,7 +7483,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", - "dev": true, "requires": { "buffer-alloc-unsafe": "^1.1.0", "buffer-fill": "^1.0.0" @@ -814,62 +7491,52 @@ "buffer-alloc-unsafe": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", - "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==", - "dev": true + "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" }, "buffer-crc32": { "version": "0.2.13", "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=", - "dev": true + "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=" }, "buffer-equal-constant-time": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", - "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=", - "dev": true + "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=" }, "buffer-fill": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", - "integrity": "sha1-+PeLdniYiO858gXNY39o5wISKyw=", - "dev": true + "integrity": "sha1-+PeLdniYiO858gXNY39o5wISKyw=" }, "buffer-from": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", - "dev": true + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" }, "buffer-shims": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/buffer-shims/-/buffer-shims-1.0.0.tgz", - "integrity": "sha1-mXjOMXOIxkmth5MCjDR37wRKi1E=", - "dev": true + "integrity": "sha1-mXjOMXOIxkmth5MCjDR37wRKi1E=" }, "buffer-xor": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", - "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=", - "dev": true + "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=" }, "builtin-status-codes": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", - "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=", - "dev": true + "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=" }, "bytes": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", - "dev": true + "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" }, "cache-base": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", - "dev": true, "requires": { "collection-visit": "^1.0.0", "component-emitter": "^1.2.1", @@ -885,28 +7552,24 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" } } }, "cached-path-relative": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/cached-path-relative/-/cached-path-relative-1.0.2.tgz", - "integrity": "sha512-5r2GqsoEb4qMTTN9J+WzXfjov+hjxT+j3u5K+kIVNIwAd99DLCJE9pBIMP1qVeybV6JiijL385Oz0DcYxfbOIg==", - "dev": true + "integrity": "sha512-5r2GqsoEb4qMTTN9J+WzXfjov+hjxT+j3u5K+kIVNIwAd99DLCJE9pBIMP1qVeybV6JiijL385Oz0DcYxfbOIg==" }, "call-me-maybe": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz", - "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=", - "dev": true + "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=" }, "caller-path": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-0.1.0.tgz", "integrity": "sha1-lAhe9jWB7NPaqSREqP6U6CV3dR8=", - "dev": true, "requires": { "callsites": "^0.2.0" } @@ -914,26 +7577,22 @@ "callsite": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/callsite/-/callsite-1.0.0.tgz", - "integrity": "sha1-KAOY5dZkvXQDi28JBRU+borxvCA=", - "dev": true + "integrity": "sha1-KAOY5dZkvXQDi28JBRU+borxvCA=" }, "callsites": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-0.2.0.tgz", - "integrity": "sha1-r6uWJikQp/M8GaV3WCXGnzTjUMo=", - "dev": true + "integrity": "sha1-r6uWJikQp/M8GaV3WCXGnzTjUMo=" }, "camelcase": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", - "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=", - "dev": true + "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=" }, "catharsis": { "version": "0.8.11", "resolved": "https://registry.npmjs.org/catharsis/-/catharsis-0.8.11.tgz", "integrity": "sha512-a+xUyMV7hD1BrDQA/3iPV7oc+6W26BgVJO05PGEoatMyIuPScQKsde6i3YorWX1qs+AZjnJ18NqdKoCtKiNh1g==", - "dev": true, "requires": { "lodash": "^4.17.14" } @@ -953,7 +7612,6 @@ "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -963,14 +7621,12 @@ "chance": { "version": "1.0.16", "resolved": "https://registry.npmjs.org/chance/-/chance-1.0.16.tgz", - "integrity": "sha512-2bgDHH5bVfAXH05SPtjqrsASzZ7h90yCuYT2z4mkYpxxYvJXiIydBFzVieVHZx7wLH1Ag2Azaaej2/zA1XUrNQ==", - "dev": true + "integrity": "sha512-2bgDHH5bVfAXH05SPtjqrsASzZ7h90yCuYT2z4mkYpxxYvJXiIydBFzVieVHZx7wLH1Ag2Azaaej2/zA1XUrNQ==" }, "chokidar": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-1.7.0.tgz", "integrity": "sha1-eY5ol3gVHIB2tLNg5e3SjNortGg=", - "dev": true, "requires": { "anymatch": "^1.3.0", "async-each": "^1.0.0", @@ -987,7 +7643,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", - "dev": true, "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -997,7 +7652,6 @@ "version": "0.3.6", "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", - "dev": true, "requires": { "arr-union": "^3.1.0", "define-property": "^0.2.5", @@ -1009,7 +7663,6 @@ "version": "0.2.5", "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, "requires": { "is-descriptor": "^0.1.0" } @@ -1017,8 +7670,7 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" } } }, @@ -1026,7 +7678,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/cli-color/-/cli-color-1.1.0.tgz", "integrity": "sha1-3hiM3Ekp2DtnrqBBEPvtQP2/Z3U=", - "dev": true, "requires": { "ansi-regex": "2", "d": "^0.1.1", @@ -1040,7 +7691,6 @@ "version": "3.2.0", "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", - "dev": true, "requires": { "string-width": "^1.0.1", "strip-ansi": "^3.0.1", @@ -1050,14 +7700,12 @@ "code-point-at": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", - "dev": true + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" }, "collection-visit": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", - "dev": true, "requires": { "map-visit": "^1.0.0", "object-visit": "^1.0.0" @@ -1067,7 +7715,6 @@ "version": "0.8.0", "resolved": "https://registry.npmjs.org/color/-/color-0.8.0.tgz", "integrity": "sha1-iQwHw/1OZJU3Y4kRz2keVFi2/KU=", - "dev": true, "requires": { "color-convert": "^0.5.0", "color-string": "^0.3.0" @@ -1076,8 +7723,7 @@ "color-convert": { "version": "0.5.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-0.5.3.tgz", - "integrity": "sha1-vbbGnOZg+t/+CwAHzER+G59ygr0=", - "dev": true + "integrity": "sha1-vbbGnOZg+t/+CwAHzER+G59ygr0=" } } }, @@ -1085,7 +7731,6 @@ "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, "requires": { "color-name": "1.1.3" } @@ -1093,14 +7738,12 @@ "color-name": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, "color-string": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/color-string/-/color-string-0.3.0.tgz", "integrity": "sha1-J9RvtnAlxcL6JZk7+/V55HhBuZE=", - "dev": true, "requires": { "color-name": "^1.0.0" } @@ -1108,20 +7751,17 @@ "colornames": { "version": "0.0.2", "resolved": "https://registry.npmjs.org/colornames/-/colornames-0.0.2.tgz", - "integrity": "sha1-2BH9bIT1kClJmorEQ2ICk1uSvjE=", - "dev": true + "integrity": "sha1-2BH9bIT1kClJmorEQ2ICk1uSvjE=" }, "colors": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz", - "integrity": "sha1-BDP0TYCWgP3rYO0mDxsMJi6CpAs=", - "dev": true + "integrity": "sha1-BDP0TYCWgP3rYO0mDxsMJi6CpAs=" }, "colorspace": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.0.1.tgz", "integrity": "sha1-yZx5btMRKLmHalLh7l7gOkpxl0k=", - "dev": true, "requires": { "color": "0.8.x", "text-hex": "0.0.x" @@ -1131,7 +7771,6 @@ "version": "0.8.0", "resolved": "https://registry.npmjs.org/combine-source-map/-/combine-source-map-0.8.0.tgz", "integrity": "sha1-pY0N8ELBhvz4IqjoAV9UUNLXmos=", - "dev": true, "requires": { "convert-source-map": "~1.1.0", "inline-source-map": "~0.6.0", @@ -1143,7 +7782,6 @@ "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dev": true, "requires": { "delayed-stream": "~1.0.0" } @@ -1151,32 +7789,27 @@ "commander": { "version": "2.15.1", "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz", - "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==", - "dev": true + "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==" }, "component-bind": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/component-bind/-/component-bind-1.0.0.tgz", - "integrity": "sha1-AMYIq33Nk4l8AAllGx06jh5zu9E=", - "dev": true + "integrity": "sha1-AMYIq33Nk4l8AAllGx06jh5zu9E=" }, "component-emitter": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", - "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==", - "dev": true + "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==" }, "component-inherit": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/component-inherit/-/component-inherit-0.0.3.tgz", - "integrity": "sha1-ZF/ErfWLcrZJ1crmUTVhnbJv8UM=", - "dev": true + "integrity": "sha1-ZF/ErfWLcrZJ1crmUTVhnbJv8UM=" }, "compress-commons": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/compress-commons/-/compress-commons-1.2.2.tgz", "integrity": "sha1-UkqfEJA/OoEzibAiXSfEi7dRiQ8=", - "dev": true, "requires": { "buffer-crc32": "^0.2.1", "crc32-stream": "^2.0.0", @@ -1188,7 +7821,6 @@ "version": "2.0.18", "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "dev": true, "requires": { "mime-db": ">= 1.43.0 < 2" } @@ -1197,7 +7829,6 @@ "version": "1.7.2", "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.2.tgz", "integrity": "sha1-qv+81qr4VLROuygDU9WtFlH1mmk=", - "dev": true, "requires": { "accepts": "~1.3.4", "bytes": "3.0.0", @@ -1212,7 +7843,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -1220,14 +7850,12 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, "safe-buffer": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", - "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==", - "dev": true + "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" } } }, @@ -1240,7 +7868,6 @@ "version": "1.6.2", "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "dev": true, "requires": { "buffer-from": "^1.0.0", "inherits": "^2.0.3", @@ -1252,7 +7879,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/connect-multiparty/-/connect-multiparty-2.1.0.tgz", "integrity": "sha512-DLzhq7mcQKKk/Y83NLY5dp0kxO0xTxA5yu3oMgFBfpWLQR1NArrXMBcEXignTcNFVaXrjkgfepNG3nkfEy9Sow==", - "dev": true, "requires": { "multiparty": "~4.1.3", "on-finished": "~2.3.0", @@ -1263,44 +7889,37 @@ "console-browserify": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", - "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==", - "dev": true + "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==" }, "constants-browserify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", - "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=", - "dev": true + "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=" }, "content-disposition": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", - "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=", - "dev": true + "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=" }, "content-type": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", - "dev": true + "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" }, "convert-source-map": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.1.3.tgz", - "integrity": "sha1-SCnId+n+SbMWHzvzZziI4gRpmGA=", - "dev": true + "integrity": "sha1-SCnId+n+SbMWHzvzZziI4gRpmGA=" }, "cookie": { "version": "0.3.1", "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz", - "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=", - "dev": true + "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=" }, "cookie-parser": { "version": "1.4.3", "resolved": "https://registry.npmjs.org/cookie-parser/-/cookie-parser-1.4.3.tgz", "integrity": "sha1-D+MfoZ0AC5X0qt8fU/3CuKIDuqU=", - "dev": true, "requires": { "cookie": "0.3.1", "cookie-signature": "1.0.6" @@ -1309,44 +7928,37 @@ "cookie-signature": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=", - "dev": true + "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" }, "cookiejar": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.1.2.tgz", - "integrity": "sha512-Mw+adcfzPxcPeI+0WlvRrr/3lGVO0bD75SxX6811cxSh1Wbxx7xZBGK1eVtDf6si8rg2lhnUjsVLMFMfbRIuwA==", - "dev": true + "integrity": "sha512-Mw+adcfzPxcPeI+0WlvRrr/3lGVO0bD75SxX6811cxSh1Wbxx7xZBGK1eVtDf6si8rg2lhnUjsVLMFMfbRIuwA==" }, "copy-descriptor": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", - "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", - "dev": true + "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=" }, "copy-to": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/copy-to/-/copy-to-2.0.1.tgz", - "integrity": "sha1-JoD7uAaKSNCGVrYJgJK9r8kG9KU=", - "dev": true + "integrity": "sha1-JoD7uAaKSNCGVrYJgJK9r8kG9KU=" }, "core-js": { "version": "2.6.11", "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.11.tgz", - "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==", - "dev": true + "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==" }, "core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", - "dev": true + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" }, "crc": { "version": "3.8.0", "resolved": "https://registry.npmjs.org/crc/-/crc-3.8.0.tgz", "integrity": "sha512-iX3mfgcTMIq3ZKLIsVFAbv7+Mc10kxabAGQb8HvjA1o3T1PIYprbakQ65d3I+2HGHt6nSKkM9PYjgoJO2KcFBQ==", - "dev": true, "requires": { "buffer": "^5.1.0" } @@ -1355,7 +7967,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/crc32-stream/-/crc32-stream-2.0.0.tgz", "integrity": "sha1-483TtN8xaN10494/u8t7KX/pCPQ=", - "dev": true, "requires": { "crc": "^3.4.4", "readable-stream": "^2.0.0" @@ -1365,7 +7976,6 @@ "version": "4.0.4", "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", - "dev": true, "requires": { "bn.js": "^4.1.0", "elliptic": "^6.5.3" @@ -1374,8 +7984,7 @@ "bn.js": { "version": "4.11.9", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", - "dev": true + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" } } }, @@ -1383,7 +7992,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", - "dev": true, "requires": { "cipher-base": "^1.0.1", "inherits": "^2.0.1", @@ -1396,7 +8004,6 @@ "version": "1.1.7", "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", - "dev": true, "requires": { "cipher-base": "^1.0.3", "create-hash": "^1.1.0", @@ -1410,7 +8017,6 @@ "version": "3.12.0", "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", - "dev": true, "requires": { "browserify-cipher": "^1.0.0", "browserify-sign": "^4.0.0", @@ -1428,14 +8034,12 @@ "cycle": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", - "integrity": "sha1-IegLK+hYD5i0aPN5QwZisEbDStI=", - "dev": true + "integrity": "sha1-IegLK+hYD5i0aPN5QwZisEbDStI=" }, "d": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/d/-/d-0.1.1.tgz", "integrity": "sha1-2hhMU10Y2O57oqoim5FACfrhEwk=", - "dev": true, "requires": { "es5-ext": "~0.10.2" } @@ -1443,14 +8047,12 @@ "dash-ast": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/dash-ast/-/dash-ast-1.0.0.tgz", - "integrity": "sha512-Vy4dx7gquTeMcQR/hDkYLGUnwVil6vk4FOOct+djUnHOUWt+zJPJAaRIXaAFkPXtJjvlY7o3rfRu0/3hpnwoUA==", - "dev": true + "integrity": "sha512-Vy4dx7gquTeMcQR/hDkYLGUnwVil6vk4FOOct+djUnHOUWt+zJPJAaRIXaAFkPXtJjvlY7o3rfRu0/3hpnwoUA==" }, "debug": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", - "dev": true, "requires": { "ms": "2.0.0" }, @@ -1458,22 +8060,19 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } }, "decamelize": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=" }, "decode-uri-component": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", - "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", - "dev": true + "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=" }, "deep-eql": { "version": "0.1.3", @@ -1496,7 +8095,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/default-user-agent/-/default-user-agent-1.0.0.tgz", "integrity": "sha1-FsRu/cq6PtxF8k8r1IaLAbfCrcY=", - "dev": true, "requires": { "os-name": "~1.0.3" } @@ -1505,7 +8103,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", - "dev": true, "requires": { "is-descriptor": "^1.0.2", "isobject": "^3.0.1" @@ -1515,7 +8112,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, "requires": { "kind-of": "^6.0.0" } @@ -1524,7 +8120,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, "requires": { "kind-of": "^6.0.0" } @@ -1533,7 +8128,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -1543,40 +8137,34 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" }, "kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==" } } }, "defined": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/defined/-/defined-1.0.0.tgz", - "integrity": "sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM=", - "dev": true + "integrity": "sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM=" }, "delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", - "dev": true + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" }, "depd": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", - "dev": true + "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=" }, "deps-sort": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/deps-sort/-/deps-sort-2.0.1.tgz", "integrity": "sha512-1orqXQr5po+3KI6kQb9A4jnXT1PBwggGl2d7Sq2xsnOeI9GPcE/tGcF9UiSZtZBM7MukY4cAh7MemS6tZYipfw==", - "dev": true, "requires": { "JSONStream": "^1.0.3", "shasum-object": "^1.0.0", @@ -1588,7 +8176,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz", "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==", - "dev": true, "requires": { "inherits": "^2.0.1", "minimalistic-assert": "^1.0.0" @@ -1597,14 +8184,12 @@ "destroy": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=", - "dev": true + "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" }, "detective": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/detective/-/detective-5.2.0.tgz", "integrity": "sha512-6SsIx+nUUbuK0EthKjv0zrdnajCCXVYGmbYYiYjFVpzcjwEs/JMDZ8tPRG29J/HhN56t3GJp2cGSWDRjjot8Pg==", - "dev": true, "requires": { "acorn-node": "^1.6.1", "defined": "^1.0.0", @@ -1614,8 +8199,7 @@ "minimist": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" } } }, @@ -1623,7 +8207,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/diagnostics/-/diagnostics-1.0.1.tgz", "integrity": "sha1-rM2wgMgrsl0N1zQwqeaof7tDFUE=", - "dev": true, "requires": { "colorspace": "1.0.x", "enabled": "1.0.x", @@ -1640,7 +8223,6 @@ "version": "5.0.3", "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", - "dev": true, "requires": { "bn.js": "^4.1.0", "miller-rabin": "^4.0.0", @@ -1650,8 +8232,7 @@ "bn.js": { "version": "4.11.9", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", - "dev": true + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" } } }, @@ -1659,7 +8240,6 @@ "version": "0.0.1", "resolved": "https://registry.npmjs.org/digest-header/-/digest-header-0.0.1.tgz", "integrity": "sha1-Ecz23uxXZqw3l0TZAcEsuklRS+Y=", - "dev": true, "requires": { "utility": "0.1.11" }, @@ -1668,7 +8248,6 @@ "version": "0.1.11", "resolved": "https://registry.npmjs.org/utility/-/utility-0.1.11.tgz", "integrity": "sha1-/eYM+bTkdRlHoM9dEEzik2ciZxU=", - "dev": true, "requires": { "address": ">=0.0.1" } @@ -1679,7 +8258,6 @@ "version": "0.2.2", "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "dev": true, "requires": { "domelementtype": "^2.0.1", "entities": "^2.0.0" @@ -1688,20 +8266,17 @@ "domain-browser": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", - "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", - "dev": true + "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==" }, "domelementtype": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.0.1.tgz", - "integrity": "sha512-5HOHUDsYZWV8FGWN0Njbr/Rn7f/eWSQi1v7+HsUVwXgn8nWWlL64zKDkS0n8ZmQ3mlWOMuXOnR+7Nx/5tMO5AQ==", - "dev": true + "integrity": "sha512-5HOHUDsYZWV8FGWN0Njbr/Rn7f/eWSQi1v7+HsUVwXgn8nWWlL64zKDkS0n8ZmQ3mlWOMuXOnR+7Nx/5tMO5AQ==" }, "domhandler": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-3.0.0.tgz", "integrity": "sha512-eKLdI5v9m67kbXQbJSNn1zjh0SDzvzWVWtX+qEI3eMjZw8daH9k8rlj1FZY9memPwjiskQFbe7vHVVJIAqoEhw==", - "dev": true, "requires": { "domelementtype": "^2.0.1" } @@ -1710,7 +8285,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.1.0.tgz", "integrity": "sha512-CD9M0Dm1iaHfQ1R/TI+z3/JWp/pgub0j4jIQKH89ARR4ATAV2nbaOQS5XxU9maJP5jHaPdDDQSEHuE2UmpUTKg==", - "dev": true, "requires": { "dom-serializer": "^0.2.1", "domelementtype": "^2.0.1", @@ -1720,14 +8294,12 @@ "double-ended-queue": { "version": "2.1.0-0", "resolved": "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz", - "integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw=", - "dev": true + "integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw=" }, "duplexer2": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", - "dev": true, "requires": { "readable-stream": "^2.0.2" } @@ -1736,7 +8308,6 @@ "version": "3.7.1", "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", - "dev": true, "requires": { "end-of-stream": "^1.0.0", "inherits": "^2.0.1", @@ -1748,7 +8319,6 @@ "version": "1.0.11", "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", - "dev": true, "requires": { "safe-buffer": "^5.0.1" } @@ -1756,20 +8326,17 @@ "ee-first": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=", - "dev": true + "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" }, "ejs": { "version": "2.6.1", "resolved": "https://registry.npmjs.org/ejs/-/ejs-2.6.1.tgz", - "integrity": "sha512-0xy4A/twfrRCnkhfk8ErDi5DqdAsAqeGxht4xkCUrsvhhbQNs7E+4jV0CN7+NKIY0aHE72+XvqtBIXzD31ZbXQ==", - "dev": true + "integrity": "sha512-0xy4A/twfrRCnkhfk8ErDi5DqdAsAqeGxht4xkCUrsvhhbQNs7E+4jV0CN7+NKIY0aHE72+XvqtBIXzD31ZbXQ==" }, "elliptic": { "version": "6.5.3", "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz", "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==", - "dev": true, "requires": { "bn.js": "^4.4.0", "brorand": "^1.0.1", @@ -1783,22 +8350,19 @@ "bn.js": { "version": "4.11.9", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", - "dev": true + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" } } }, "emits": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/emits/-/emits-3.0.0.tgz", - "integrity": "sha1-MnUrupXhcHshlWI4Srm7ix/WL3A=", - "dev": true + "integrity": "sha1-MnUrupXhcHshlWI4Srm7ix/WL3A=" }, "enabled": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/enabled/-/enabled-1.0.2.tgz", "integrity": "sha1-ll9lE9LC0cX0ZStkouM5ZGf8L5M=", - "dev": true, "requires": { "env-variable": "0.0.x" } @@ -1806,14 +8370,12 @@ "encodeurl": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", - "dev": true + "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=" }, "end-of-stream": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, "requires": { "once": "^1.4.0" } @@ -1822,7 +8384,6 @@ "version": "3.2.1", "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.2.1.tgz", "integrity": "sha512-+VlKzHzMhaU+GsCIg4AoXF1UdDFjHHwMmMKqMJNDNLlUlejz58FCy4LBqB2YVJskHGYl06BatYWKP2TVdVXE5w==", - "dev": true, "requires": { "accepts": "~1.3.4", "base64id": "1.0.0", @@ -1836,7 +8397,6 @@ "version": "3.2.1", "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.2.1.tgz", "integrity": "sha512-y5AbkytWeM4jQr7m/koQLc5AxpRKC1hEVUb/s1FUAWEJq5AzJJ4NLvzuKPuxtDi5Mq755WuDvZ6Iv2rXj4PTzw==", - "dev": true, "requires": { "component-emitter": "1.2.1", "component-inherit": "0.0.3", @@ -1854,8 +8414,7 @@ "component-emitter": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", - "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=", - "dev": true + "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=" } } }, @@ -1863,7 +8422,6 @@ "version": "2.1.3", "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.1.3.tgz", "integrity": "sha512-6HXPre2O4Houl7c4g7Ic/XzPnHBvaEmN90vtRO9uLmwtRqQmTOw0QMevL1TOfL2Cpu1VzsaTmMotQgMdkzGkVA==", - "dev": true, "requires": { "after": "0.8.2", "arraybuffer.slice": "~0.0.7", @@ -1875,20 +8433,17 @@ "entities": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", - "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==", - "dev": true + "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==" }, "env-variable": { "version": "0.0.6", "resolved": "https://registry.npmjs.org/env-variable/-/env-variable-0.0.6.tgz", - "integrity": "sha512-bHz59NlBbtS0NhftmR8+ExBEekE7br0e01jw+kk0NDro7TtZzBYZ5ScGPs3OmwnpyfHTHOtr1Y6uedCdrIldtg==", - "dev": true + "integrity": "sha512-bHz59NlBbtS0NhftmR8+ExBEekE7br0e01jw+kk0NDro7TtZzBYZ5ScGPs3OmwnpyfHTHOtr1Y6uedCdrIldtg==" }, "es5-ext": { "version": "0.10.53", "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", - "dev": true, "requires": { "es6-iterator": "~2.0.3", "es6-symbol": "~3.1.3", @@ -1899,7 +8454,6 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", - "dev": true, "requires": { "d": "1", "es5-ext": "^0.10.35", @@ -1910,7 +8464,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", - "dev": true, "requires": { "es5-ext": "^0.10.50", "type": "^1.0.1" @@ -1921,14 +8474,12 @@ "es6-promise": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-3.2.1.tgz", - "integrity": "sha1-7FYjOGgDKQkgcXDDlEjiREndH8Q=", - "dev": true + "integrity": "sha1-7FYjOGgDKQkgcXDDlEjiREndH8Q=" }, "es6-symbol": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", - "dev": true, "requires": { "d": "^1.0.1", "ext": "^1.1.2" @@ -1938,7 +8489,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", - "dev": true, "requires": { "es5-ext": "^0.10.50", "type": "^1.0.1" @@ -1950,7 +8500,6 @@ "version": "0.1.4", "resolved": "https://registry.npmjs.org/es6-weak-map/-/es6-weak-map-0.1.4.tgz", "integrity": "sha1-cGzvnpmqI2undmwjnIueKG6n0ig=", - "dev": true, "requires": { "d": "~0.1.1", "es5-ext": "~0.10.6", @@ -1962,7 +8511,6 @@ "version": "0.1.3", "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-0.1.3.tgz", "integrity": "sha1-1vWLjE/EE8JJtLqhl2j45NfIlE4=", - "dev": true, "requires": { "d": "~0.1.1", "es5-ext": "~0.10.5", @@ -1973,7 +8521,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-2.0.1.tgz", "integrity": "sha1-dhtcZ8/U8dGK+yNPaR1nhoLLO/M=", - "dev": true, "requires": { "d": "~0.1.1", "es5-ext": "~0.10.5" @@ -1984,32 +8531,27 @@ "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=", - "dev": true + "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" }, "escape-string-regexp": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" }, "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" }, "etag": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=", - "dev": true + "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" }, "event-emitter": { "version": "0.3.5", "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", "integrity": "sha1-34xp7vFkeSPHFXuc6DhAYQsCzDk=", - "dev": true, "requires": { "d": "1", "es5-ext": "~0.10.14" @@ -2019,7 +8561,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", - "dev": true, "requires": { "es5-ext": "^0.10.50", "type": "^1.0.1" @@ -2030,14 +8571,12 @@ "events": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=", - "dev": true + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" }, "evp_bytestokey": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", - "dev": true, "requires": { "md5.js": "^1.3.4", "safe-buffer": "^5.1.1" @@ -2047,7 +8586,6 @@ "version": "0.1.5", "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-0.1.5.tgz", "integrity": "sha1-3wcoTjQqgHzXM6xa9yQR5YHRF3s=", - "dev": true, "requires": { "is-posix-bracket": "^0.1.0" } @@ -2056,7 +8594,6 @@ "version": "1.8.2", "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", "integrity": "sha1-opnv/TNf4nIeuujiV+x5ZE/IUzc=", - "dev": true, "requires": { "fill-range": "^2.1.0" } @@ -2065,7 +8602,6 @@ "version": "4.16.3", "resolved": "https://registry.npmjs.org/express/-/express-4.16.3.tgz", "integrity": "sha1-avilAjUNsyRuzEvs9rWjTSL37VM=", - "dev": true, "requires": { "accepts": "~1.3.5", "array-flatten": "1.1.1", @@ -2103,7 +8639,6 @@ "version": "1.18.2", "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.2.tgz", "integrity": "sha1-h2eKGdhLR9hZuDGZvVm84iKxBFQ=", - "dev": true, "requires": { "bytes": "3.0.0", "content-type": "~1.0.4", @@ -2121,7 +8656,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -2129,26 +8663,22 @@ "iconv-lite": { "version": "0.4.19", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz", - "integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ==", - "dev": true + "integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ==" }, "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, "qs": { "version": "6.5.1", "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz", - "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A==", - "dev": true + "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A==" }, "raw-body": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.2.tgz", "integrity": "sha1-vNYMd9Prk83gBQKVw/N5OJvIj4k=", - "dev": true, "requires": { "bytes": "3.0.0", "http-errors": "1.6.2", @@ -2159,14 +8689,12 @@ "depd": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.1.tgz", - "integrity": "sha1-V4O04cRZ8G+lyif5kfPQbnoxA1k=", - "dev": true + "integrity": "sha1-V4O04cRZ8G+lyif5kfPQbnoxA1k=" }, "http-errors": { "version": "1.6.2", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.2.tgz", "integrity": "sha1-CgAsyFcHGSp+eUbO7cERVfYOxzY=", - "dev": true, "requires": { "depd": "1.1.1", "inherits": "2.0.3", @@ -2177,22 +8705,19 @@ "setprototypeof": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.0.3.tgz", - "integrity": "sha1-ZlZ+NwQ+608E2RvWWMDL77VbjgQ=", - "dev": true + "integrity": "sha1-ZlZ+NwQ+608E2RvWWMDL77VbjgQ=" } } }, "safe-buffer": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", - "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==", - "dev": true + "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" }, "statuses": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", - "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==", - "dev": true + "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" } } }, @@ -2200,7 +8725,6 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", - "dev": true, "requires": { "type": "^2.0.0" }, @@ -2208,22 +8732,19 @@ "type": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/type/-/type-2.0.0.tgz", - "integrity": "sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==", - "dev": true + "integrity": "sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==" } } }, "extend": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "dev": true + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" }, "extend-shallow": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dev": true, "requires": { "assign-symbols": "^1.0.0", "is-extendable": "^1.0.1" @@ -2233,7 +8754,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dev": true, "requires": { "is-plain-object": "^2.0.4" } @@ -2244,7 +8764,6 @@ "version": "0.3.2", "resolved": "https://registry.npmjs.org/extglob/-/extglob-0.3.2.tgz", "integrity": "sha1-Lhj/PS9JqydlzskCPwEdqo2DSaE=", - "dev": true, "requires": { "is-extglob": "^1.0.0" } @@ -2252,20 +8771,17 @@ "eyes": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", - "integrity": "sha1-Ys8SAjTGg3hdkCNIqADvPgzCC8A=", - "dev": true + "integrity": "sha1-Ys8SAjTGg3hdkCNIqADvPgzCC8A=" }, "fast-safe-stringify": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", - "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==", - "dev": true + "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" }, "fd-slicer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.0.1.tgz", "integrity": "sha1-i1vL2ewyfFBBv5qwI/1nUPEXfmU=", - "dev": true, "requires": { "pend": "~1.2.0" } @@ -2274,20 +8790,17 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "dev": true, "optional": true }, "filename-regex": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/filename-regex/-/filename-regex-2.0.1.tgz", - "integrity": "sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY=", - "dev": true + "integrity": "sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY=" }, "fill-range": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", - "dev": true, "requires": { "is-number": "^2.1.0", "isobject": "^2.0.0", @@ -2300,7 +8813,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz", "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==", - "dev": true, "requires": { "debug": "2.6.9", "encodeurl": "~1.0.2", @@ -2315,7 +8827,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -2323,28 +8834,24 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, "statuses": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", - "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==", - "dev": true + "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" } } }, "for-in": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", - "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", - "dev": true + "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=" }, "for-own": { "version": "0.1.5", "resolved": "https://registry.npmjs.org/for-own/-/for-own-0.1.5.tgz", "integrity": "sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4=", - "dev": true, "requires": { "for-in": "^1.0.1" } @@ -2353,7 +8860,6 @@ "version": "2.5.1", "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", - "dev": true, "requires": { "asynckit": "^0.4.0", "combined-stream": "^1.0.6", @@ -2363,26 +8869,22 @@ "format-util": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/format-util/-/format-util-1.0.5.tgz", - "integrity": "sha512-varLbTj0e0yVyRpqQhuWV+8hlePAgaoFRhNFj50BNjEIrw1/DphHSObtqwskVCPWNgzwPoQrZAbfa/SBiicNeg==", - "dev": true + "integrity": "sha512-varLbTj0e0yVyRpqQhuWV+8hlePAgaoFRhNFj50BNjEIrw1/DphHSObtqwskVCPWNgzwPoQrZAbfa/SBiicNeg==" }, "formidable": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/formidable/-/formidable-1.2.2.tgz", - "integrity": "sha512-V8gLm+41I/8kguQ4/o1D3RIHRmhYFG4pnNyonvua+40rqcEmT4+V71yaZ3B457xbbgCsCfjSPi65u/W6vK1U5Q==", - "dev": true + "integrity": "sha512-V8gLm+41I/8kguQ4/o1D3RIHRmhYFG4pnNyonvua+40rqcEmT4+V71yaZ3B457xbbgCsCfjSPi65u/W6vK1U5Q==" }, "forwarded": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=", - "dev": true + "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=" }, "fragment-cache": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", - "dev": true, "requires": { "map-cache": "^0.2.2" } @@ -2390,14 +8892,12 @@ "fresh": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=", - "dev": true + "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=" }, "fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", - "dev": true + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" }, "fs.realpath": { "version": "1.0.0", @@ -2408,7 +8908,6 @@ "version": "1.2.13", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz", "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==", - "dev": true, "optional": true, "requires": { "bindings": "^1.5.0", @@ -2418,26 +8917,22 @@ "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" }, "get-assigned-identifiers": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/get-assigned-identifiers/-/get-assigned-identifiers-1.2.0.tgz", - "integrity": "sha512-mBBwmeGTrxEMO4pMaaf/uUEFHnYtwr8FTe8Y/mer4rcV/bye0qGm6pw1bGZFGStxC5O76c5ZAVBGnqHmOaJpdQ==", - "dev": true + "integrity": "sha512-mBBwmeGTrxEMO4pMaaf/uUEFHnYtwr8FTe8Y/mer4rcV/bye0qGm6pw1bGZFGStxC5O76c5ZAVBGnqHmOaJpdQ==" }, "get-value": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", - "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", - "dev": true + "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=" }, "glob": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", - "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -2451,7 +8946,6 @@ "version": "0.3.0", "resolved": "https://registry.npmjs.org/glob-base/-/glob-base-0.3.0.tgz", "integrity": "sha1-27Fk9iIbHAscz4Kuoyi0l98Oo8Q=", - "dev": true, "requires": { "glob-parent": "^2.0.0", "is-glob": "^2.0.0" @@ -2461,7 +8955,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-2.0.0.tgz", "integrity": "sha1-gTg9ctsFT8zPUzbaqQLxgvbtuyg=", - "dev": true, "requires": { "is-glob": "^2.0.0" } @@ -2470,7 +8963,6 @@ "version": "2.4.0", "resolved": "https://registry.npmjs.org/got/-/got-2.4.0.tgz", "integrity": "sha1-5Ah6LNWbXSDy0WnchdIWntnon1Y=", - "dev": true, "requires": { "duplexify": "^3.2.0", "infinity-agent": "^1.0.0", @@ -2486,22 +8978,19 @@ "object-assign": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-2.1.1.tgz", - "integrity": "sha1-Q8NuXVaf+OSBbE76i+AtJpZ8GKo=", - "dev": true + "integrity": "sha1-Q8NuXVaf+OSBbE76i+AtJpZ8GKo=" } } }, "graceful-fs": { "version": "4.2.4", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", - "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==", - "dev": true + "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" }, "graceful-readlink": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/graceful-readlink/-/graceful-readlink-1.0.1.tgz", - "integrity": "sha1-TK+tdrxi8C+gObL5Tpo906ORpyU=", - "dev": true + "integrity": "sha1-TK+tdrxi8C+gObL5Tpo906ORpyU=" }, "growl": { "version": "1.10.5", @@ -2513,7 +9002,6 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, "requires": { "function-bind": "^1.1.1" } @@ -2522,7 +9010,6 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.3.tgz", "integrity": "sha512-G1LWKhDSvhGeAQ8mPVQlqNcOB2sJdwATtZKl2pDKKHfpf/rYj24lkinxf69blJbnsvtqqNU+L3SL50vzZhXOnw==", - "dev": true, "requires": { "isarray": "2.0.1" }, @@ -2530,28 +9017,24 @@ "isarray": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.1.tgz", - "integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4=", - "dev": true + "integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4=" } } }, "has-cors": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-cors/-/has-cors-1.1.0.tgz", - "integrity": "sha1-XkdHk/fqmEPRu5nCPu9J/xJv/zk=", - "dev": true + "integrity": "sha1-XkdHk/fqmEPRu5nCPu9J/xJv/zk=" }, "has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" }, "has-value": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", - "dev": true, "requires": { "get-value": "^2.0.6", "has-values": "^1.0.0", @@ -2561,8 +9044,7 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" } } }, @@ -2570,7 +9052,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", - "dev": true, "requires": { "is-number": "^3.0.0", "kind-of": "^4.0.0" @@ -2580,7 +9061,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -2589,7 +9069,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -2600,7 +9079,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -2611,7 +9089,6 @@ "version": "3.1.0", "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", - "dev": true, "requires": { "inherits": "^2.0.4", "readable-stream": "^3.6.0", @@ -2621,14 +9098,12 @@ "inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "readable-stream": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dev": true, "requires": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", @@ -2638,8 +9113,7 @@ "safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "dev": true + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" } } }, @@ -2647,7 +9121,6 @@ "version": "1.1.7", "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", - "dev": true, "requires": { "inherits": "^2.0.3", "minimalistic-assert": "^1.0.1" @@ -2663,7 +9136,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", - "dev": true, "requires": { "hash.js": "^1.0.3", "minimalistic-assert": "^1.0.0", @@ -2673,14 +9145,12 @@ "htmlescape": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/htmlescape/-/htmlescape-1.1.1.tgz", - "integrity": "sha1-OgPtwiFLyjtmQko+eVk0lQnLA1E=", - "dev": true + "integrity": "sha1-OgPtwiFLyjtmQko+eVk0lQnLA1E=" }, "htmlparser2": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-4.1.0.tgz", "integrity": "sha512-4zDq1a1zhE4gQso/c5LP1OtrhYTncXNSpvJYtWJBtXAETPlMfi3IFNjGuQbYLuVY4ZR0QMqRVvo4Pdy9KLyP8Q==", - "dev": true, "requires": { "domelementtype": "^2.0.1", "domhandler": "^3.0.0", @@ -2692,7 +9162,6 @@ "version": "1.6.3", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", - "dev": true, "requires": { "depd": "~1.1.2", "inherits": "2.0.3", @@ -2703,14 +9172,12 @@ "https-browserify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", - "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=", - "dev": true + "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=" }, "humanize-ms": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", "integrity": "sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=", - "dev": true, "requires": { "ms": "^2.0.0" } @@ -2719,7 +9186,6 @@ "version": "0.4.23", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz", "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==", - "dev": true, "requires": { "safer-buffer": ">= 2.1.2 < 3" } @@ -2727,20 +9193,17 @@ "ieee754": { "version": "1.1.13", "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", - "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==", - "dev": true + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" }, "indexof": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/indexof/-/indexof-0.0.1.tgz", - "integrity": "sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10=", - "dev": true + "integrity": "sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10=" }, "infinity-agent": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/infinity-agent/-/infinity-agent-1.0.2.tgz", - "integrity": "sha1-Lp2iwHC5hkqLxmwBlOF5HtgFgCU=", - "dev": true + "integrity": "sha1-Lp2iwHC5hkqLxmwBlOF5HtgFgCU=" }, "inflight": { "version": "1.0.6", @@ -2760,7 +9223,6 @@ "version": "1.3.2", "resolved": "https://registry.npmjs.org/ink-docstrap/-/ink-docstrap-1.3.2.tgz", "integrity": "sha512-STx5orGQU1gfrkoI/fMU7lX6CSP7LBGO10gXNgOZhwKhUqbtNjCkYSewJtNnLmWP1tAGN6oyEpG1HFPw5vpa5Q==", - "dev": true, "requires": { "moment": "^2.14.1", "sanitize-html": "^1.13.0" @@ -2770,7 +9232,6 @@ "version": "0.6.2", "resolved": "https://registry.npmjs.org/inline-source-map/-/inline-source-map-0.6.2.tgz", "integrity": "sha1-+Tk0ccGKedFyT4Y/o4tYY3Ct4qU=", - "dev": true, "requires": { "source-map": "~0.5.3" } @@ -2779,13 +9240,12 @@ "version": "7.2.0", "resolved": "https://registry.npmjs.org/insert-module-globals/-/insert-module-globals-7.2.0.tgz", "integrity": "sha512-VE6NlW+WGn2/AeOMd496AHFYmE7eLKkUY6Ty31k4og5vmA3Fjuwe9v6ifH6Xx/Hz27QvdoMoviw1/pqWRB09Sw==", - "dev": true, "requires": { - "JSONStream": "^1.0.3", "acorn-node": "^1.5.2", "combine-source-map": "^0.8.0", "concat-stream": "^1.6.1", "is-buffer": "^1.1.0", + "JSONStream": "^1.0.3", "path-is-absolute": "^1.0.1", "process": "~0.11.0", "through2": "^2.0.0", @@ -2796,20 +9256,17 @@ "invert-kv": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", - "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", - "dev": true + "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=" }, "ipaddr.js": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "dev": true + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" }, "is-accessor-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dev": true, "requires": { "kind-of": "^3.0.2" } @@ -2818,7 +9275,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", - "dev": true, "requires": { "binary-extensions": "^1.0.0" } @@ -2826,14 +9282,12 @@ "is-buffer": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", - "dev": true + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" }, "is-data-descriptor": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dev": true, "requires": { "kind-of": "^3.0.2" } @@ -2842,7 +9296,6 @@ "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dev": true, "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -2852,22 +9305,19 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "dev": true + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" } } }, "is-dotfile": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/is-dotfile/-/is-dotfile-1.0.3.tgz", - "integrity": "sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE=", - "dev": true + "integrity": "sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE=" }, "is-equal-shallow": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz", "integrity": "sha1-IjgJj8Ih3gvPpdnqxMRdY4qhxTQ=", - "dev": true, "requires": { "is-primitive": "^2.0.0" } @@ -2875,20 +9325,17 @@ "is-extendable": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", - "dev": true + "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=" }, "is-extglob": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-1.0.0.tgz", - "integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA=", - "dev": true + "integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA=" }, "is-fullwidth-code-point": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, "requires": { "number-is-nan": "^1.0.0" } @@ -2897,7 +9344,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-2.0.1.tgz", "integrity": "sha1-0Jb5JqPe1WAPP9/ZEZjLCIjC2GM=", - "dev": true, "requires": { "is-extglob": "^1.0.0" } @@ -2906,7 +9352,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", "integrity": "sha1-Afy7s5NGOlSPL0ZszhbezknbkI8=", - "dev": true, "requires": { "kind-of": "^3.0.2" } @@ -2915,7 +9360,6 @@ "version": "2.0.4", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", - "dev": true, "requires": { "isobject": "^3.0.1" }, @@ -2923,46 +9367,39 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" } } }, "is-posix-bracket": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz", - "integrity": "sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q=", - "dev": true + "integrity": "sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q=" }, "is-primitive": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-2.0.0.tgz", - "integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU=", - "dev": true + "integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU=" }, "is-stream": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "dev": true + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=" }, "is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", - "dev": true + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==" }, "isarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" }, "isobject": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "dev": true, "requires": { "isarray": "1.0.0" } @@ -2970,26 +9407,22 @@ "isstream": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=", - "dev": true + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" }, "jju": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/jju/-/jju-1.2.1.tgz", - "integrity": "sha1-7fbsINXWaMgMLADOpj+KlCKktSg=", - "dev": true + "integrity": "sha1-7fbsINXWaMgMLADOpj+KlCKktSg=" }, "jmespath": { "version": "0.15.0", "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", - "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=", - "dev": true + "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=" }, "js-yaml": { "version": "3.14.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", - "dev": true, "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -2999,7 +9432,6 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-4.0.1.tgz", "integrity": "sha512-KrPTolcw6RocpYjdC7pL7v62e55q7qOMHvLX1UCLc5AAS8qeJ6nukarEJAF2KL2PZxlbGueEbINqZR2bDe/gUw==", - "dev": true, "requires": { "xmlcreate": "^2.0.3" } @@ -3008,7 +9440,6 @@ "version": "3.6.3", "resolved": "https://registry.npmjs.org/jsdoc/-/jsdoc-3.6.3.tgz", "integrity": "sha512-Yf1ZKA3r9nvtMWHO1kEuMZTlHOF8uoQ0vyo5eH7SQy5YeIiHM+B0DgKnn+X6y6KDYZcF7G2SPkKF+JORCXWE/A==", - "dev": true, "requires": { "@babel/parser": "^7.4.4", "bluebird": "^3.5.4", @@ -3029,14 +9460,12 @@ "escape-string-regexp": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "dev": true + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==" }, "marked": { "version": "0.7.0", "resolved": "https://registry.npmjs.org/marked/-/marked-0.7.0.tgz", - "integrity": "sha512-c+yYdCZJQrsRjTPhUx7VKkApw9bwDkNbHUKo1ovgcfDjb2kc8rLuRbIFyXL5WOEUwzSSKo3IXpph2K6DqB/KZg==", - "dev": true + "integrity": "sha512-c+yYdCZJQrsRjTPhUx7VKkApw9bwDkNbHUKo1ovgcfDjb2kc8rLuRbIFyXL5WOEUwzSSKo3IXpph2K6DqB/KZg==" } } }, @@ -3044,7 +9473,6 @@ "version": "3.3.1", "resolved": "https://registry.npmjs.org/json-schema-ref-parser/-/json-schema-ref-parser-3.3.1.tgz", "integrity": "sha512-stQTMhec2R/p2L9dH4XXRlpNCP0mY8QrLd/9Kl+8SHJQmwHtE1nDfXH4wbsSM+GkJMl8t92yZbI0OIol432CIQ==", - "dev": true, "requires": { "call-me-maybe": "^1.0.1", "debug": "^3.0.0", @@ -3057,8 +9485,7 @@ "es6-promise": { "version": "4.2.8", "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", - "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==", - "dev": true + "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" } } }, @@ -3066,7 +9493,6 @@ "version": "0.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-0.0.1.tgz", "integrity": "sha1-YRwj6BTbN1Un34URk9tZ3Sryf0U=", - "dev": true, "requires": { "jsonify": "~0.0.0" } @@ -3074,20 +9500,26 @@ "jsonify": { "version": "0.0.0", "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz", - "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=", - "dev": true + "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=" }, "jsonparse": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", - "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=", - "dev": true + "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=" + }, + "JSONStream": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", + "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==", + "requires": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + } }, "jsonwebtoken": { "version": "8.3.0", "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-8.3.0.tgz", "integrity": "sha512-oge/hvlmeJCH+iIz1DwcO7vKPkNGJHhgkspk8OH3VKlw+mbi42WtD4ig1+VXRln765vxptAv+xT26Fd3cteqag==", - "dev": true, "requires": { "jws": "^3.1.5", "lodash.includes": "^4.3.0", @@ -3104,7 +9536,6 @@ "version": "1.4.1", "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", - "dev": true, "requires": { "buffer-equal-constant-time": "1.0.1", "ecdsa-sig-formatter": "1.0.11", @@ -3115,7 +9546,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", - "dev": true, "requires": { "jwa": "^1.4.1", "safe-buffer": "^5.0.1" @@ -3125,7 +9555,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -3134,7 +9563,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/klaw/-/klaw-3.0.0.tgz", "integrity": "sha512-0Fo5oir+O9jnXu5EefYbVK+mHMBeEVEy2cmctR1O1NECcCkPRreJKrS6Qt/j3KC2C148Dfo9i3pCmCMsdqGr0g==", - "dev": true, "requires": { "graceful-fs": "^4.1.9" } @@ -3143,7 +9571,6 @@ "version": "0.0.0", "resolved": "https://registry.npmjs.org/kuler/-/kuler-0.0.0.tgz", "integrity": "sha1-tmu0a5NOVQ9Z2BiEjgq7pPf1VTw=", - "dev": true, "requires": { "colornames": "0.0.2" } @@ -3152,7 +9579,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/labeled-stream-splicer/-/labeled-stream-splicer-2.0.2.tgz", "integrity": "sha512-Ca4LSXFFZUjPScRaqOcFxneA0VpKZr4MMYCljyQr4LIewTLb3Y0IUTIsnBBsVubIeEfxeSZpSjSsRM8APEQaAw==", - "dev": true, "requires": { "inherits": "^2.0.1", "stream-splicer": "^2.0.0" @@ -3162,7 +9588,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/lazystream/-/lazystream-1.0.0.tgz", "integrity": "sha1-9plf4PggOS9hOWvolGJAe7dxaOQ=", - "dev": true, "requires": { "readable-stream": "^2.0.5" } @@ -3171,7 +9596,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", - "dev": true, "requires": { "invert-kv": "^1.0.0" } @@ -3180,7 +9604,6 @@ "version": "2.2.0", "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==", - "dev": true, "requires": { "uc.micro": "^1.0.1" } @@ -3188,80 +9611,67 @@ "lodash": { "version": "4.17.19", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", - "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==", - "dev": true + "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==" }, "lodash.get": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", - "dev": true + "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=" }, "lodash.includes": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", - "integrity": "sha1-YLuYqHy5I8aMoeUTJUgzFISfVT8=", - "dev": true + "integrity": "sha1-YLuYqHy5I8aMoeUTJUgzFISfVT8=" }, "lodash.isboolean": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", - "integrity": "sha1-bC4XHbKiV82WgC/UOwGyDV9YcPY=", - "dev": true + "integrity": "sha1-bC4XHbKiV82WgC/UOwGyDV9YcPY=" }, "lodash.isequal": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", - "integrity": "sha1-QVxEePK8wwEgwizhDtMib30+GOA=", - "dev": true + "integrity": "sha1-QVxEePK8wwEgwizhDtMib30+GOA=" }, "lodash.isinteger": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", - "integrity": "sha1-YZwK89A/iwTDH1iChAt3sRzWg0M=", - "dev": true + "integrity": "sha1-YZwK89A/iwTDH1iChAt3sRzWg0M=" }, "lodash.isnumber": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", - "integrity": "sha1-POdoEMWSjQM1IwGsKHMX8RwLH/w=", - "dev": true + "integrity": "sha1-POdoEMWSjQM1IwGsKHMX8RwLH/w=" }, "lodash.isplainobject": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", - "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=", - "dev": true + "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=" }, "lodash.isstring": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", - "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=", - "dev": true + "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=" }, "lodash.memoize": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-3.0.4.tgz", - "integrity": "sha1-LcvSwofLwKVcxCMovQxzYVDVPj8=", - "dev": true + "integrity": "sha1-LcvSwofLwKVcxCMovQxzYVDVPj8=" }, "lodash.once": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", - "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=", - "dev": true + "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=" }, "lowercase-keys": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "dev": true + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==" }, "lru-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/lru-queue/-/lru-queue-0.1.0.tgz", "integrity": "sha1-Jzi9nw089PhEkMVzbEhpmsYyzaM=", - "dev": true, "requires": { "es5-ext": "~0.10.2" } @@ -3269,14 +9679,12 @@ "map-cache": { "version": "0.2.2", "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", - "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", - "dev": true + "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=" }, "map-visit": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", - "dev": true, "requires": { "object-visit": "^1.0.0" } @@ -3285,7 +9693,6 @@ "version": "8.4.2", "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-8.4.2.tgz", "integrity": "sha512-GcRz3AWTqSUphY3vsUqQSFMbgR38a4Lh3GWlHRh/7MRwz8mcu9n2IO7HOh+bXHrR9kOPDl5RNCaEsrneb+xhHQ==", - "dev": true, "requires": { "argparse": "^1.0.7", "entities": "~1.1.1", @@ -3297,8 +9704,7 @@ "entities": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==", - "dev": true + "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" } } }, @@ -3306,25 +9712,22 @@ "version": "5.3.0", "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.3.0.tgz", "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==", - "dev": true + "requires": {} }, "marked": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/marked/-/marked-1.1.1.tgz", - "integrity": "sha512-mJzT8D2yPxoPh7h0UXkB+dBj4FykPJ2OIfxAWeIHrvoHDkFxukV/29QxoFQoPM6RLEwhIFdJpmKBlqVM3s2ZIw==", - "dev": true + "integrity": "sha512-mJzT8D2yPxoPh7h0UXkB+dBj4FykPJ2OIfxAWeIHrvoHDkFxukV/29QxoFQoPM6RLEwhIFdJpmKBlqVM3s2ZIw==" }, "math-random": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.4.tgz", - "integrity": "sha512-rUxjysqif/BZQH2yhd5Aaq7vXMSx9NdEsQcyA07uEzIvxgI7zIr33gGsh+RU0/XjmQpCW7RsVof1vlkvQVCK5A==", - "dev": true + "integrity": "sha512-rUxjysqif/BZQH2yhd5Aaq7vXMSx9NdEsQcyA07uEzIvxgI7zIr33gGsh+RU0/XjmQpCW7RsVof1vlkvQVCK5A==" }, "md5.js": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", - "dev": true, "requires": { "hash-base": "^3.0.0", "inherits": "^2.0.1", @@ -3334,20 +9737,17 @@ "mdurl": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4=", - "dev": true + "integrity": "sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4=" }, "media-typer": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=", - "dev": true + "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=" }, "memoizee": { "version": "0.3.10", "resolved": "https://registry.npmjs.org/memoizee/-/memoizee-0.3.10.tgz", "integrity": "sha1-TsoNiu057J0Bf0xcLy9kMvQuXI8=", - "dev": true, "requires": { "d": "~0.1.1", "es5-ext": "~0.10.11", @@ -3361,22 +9761,19 @@ "next-tick": { "version": "0.2.2", "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-0.2.2.tgz", - "integrity": "sha1-ddpKkn7liH45BliABltzNkE7MQ0=", - "dev": true + "integrity": "sha1-ddpKkn7liH45BliABltzNkE7MQ0=" } } }, "merge-descriptors": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=", - "dev": true + "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" }, "method-override": { "version": "2.3.10", "resolved": "https://registry.npmjs.org/method-override/-/method-override-2.3.10.tgz", "integrity": "sha1-49r41d7hDdLc59SuiNYrvud0drQ=", - "dev": true, "requires": { "debug": "2.6.9", "methods": "~1.1.2", @@ -3388,7 +9785,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -3396,22 +9792,19 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } }, "methods": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=", - "dev": true + "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=" }, "micromatch": { "version": "2.3.11", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-2.3.11.tgz", "integrity": "sha1-hmd8l9FyCzY0MdBNDRUpO9OMFWU=", - "dev": true, "requires": { "arr-diff": "^2.0.0", "array-unique": "^0.2.1", @@ -3432,7 +9825,6 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", - "dev": true, "requires": { "bn.js": "^4.0.0", "brorand": "^1.0.1" @@ -3441,28 +9833,24 @@ "bn.js": { "version": "4.11.9", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", - "dev": true + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" } } }, "mime": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "dev": true + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==" }, "mime-db": { "version": "1.44.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", - "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==", - "dev": true + "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==" }, "mime-types": { "version": "2.1.27", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", - "dev": true, "requires": { "mime-db": "1.44.0" } @@ -3470,20 +9858,17 @@ "minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", - "dev": true + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" }, "minimalistic-crypto-utils": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=", - "dev": true + "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=" }, "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dev": true, "requires": { "brace-expansion": "^1.1.7" } @@ -3491,14 +9876,12 @@ "minimist": { "version": "0.0.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", - "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", - "dev": true + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=" }, "minimize": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/minimize/-/minimize-2.0.0.tgz", "integrity": "sha1-emssOzrVlkidDUTX/QpvPi3krT8=", - "dev": true, "requires": { "argh": "~0.1.4", "async": "~2.0.0-rc.6", @@ -3513,7 +9896,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/async/-/async-2.0.1.tgz", "integrity": "sha1-twnMAoCpw28J9FNr6CPIOKkEniU=", - "dev": true, "requires": { "lodash": "^4.8.0" } @@ -3521,14 +9903,12 @@ "domelementtype": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==", - "dev": true + "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" }, "domhandler": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", - "dev": true, "requires": { "domelementtype": "1" } @@ -3537,7 +9917,6 @@ "version": "1.7.0", "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "dev": true, "requires": { "dom-serializer": "0", "domelementtype": "1" @@ -3546,14 +9925,12 @@ "entities": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==", - "dev": true + "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" }, "htmlparser2": { "version": "3.9.2", "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.9.2.tgz", "integrity": "sha1-G9+HrMoPP55T+k/M6w9LTLsAszg=", - "dev": true, "requires": { "domelementtype": "^1.3.0", "domhandler": "^2.3.0", @@ -3566,8 +9943,7 @@ "node-uuid": { "version": "1.4.8", "resolved": "https://registry.npmjs.org/node-uuid/-/node-uuid-1.4.8.tgz", - "integrity": "sha1-sEDrCSOWivq/jTL7HxfxFn/auQc=", - "dev": true + "integrity": "sha1-sEDrCSOWivq/jTL7HxfxFn/auQc=" } } }, @@ -3575,7 +9951,6 @@ "version": "1.3.2", "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", - "dev": true, "requires": { "for-in": "^1.0.2", "is-extendable": "^1.0.1" @@ -3585,7 +9960,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dev": true, "requires": { "is-plain-object": "^2.0.4" } @@ -3596,7 +9970,6 @@ "version": "0.5.1", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", - "dev": true, "requires": { "minimist": "0.0.8" } @@ -3641,9 +10014,7 @@ "version": "6.2.3", "resolved": "https://registry.npmjs.org/module-deps/-/module-deps-6.2.3.tgz", "integrity": "sha512-fg7OZaQBcL4/L+AK5f4iVqf9OMbCclXfy/znXRxTVhJSeW5AIlS9AwheYwDaXM3lVW7OBeaeUEY3gbaC6cLlSA==", - "dev": true, "requires": { - "JSONStream": "^1.0.3", "browser-resolve": "^2.0.0", "cached-path-relative": "^1.0.2", "concat-stream": "~1.6.0", @@ -3651,6 +10022,7 @@ "detective": "^5.2.0", "duplexer2": "^0.1.2", "inherits": "^2.0.1", + "JSONStream": "^1.0.3", "parents": "^1.0.0", "readable-stream": "^2.0.2", "resolve": "^1.4.0", @@ -3664,7 +10036,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/browser-resolve/-/browser-resolve-2.0.0.tgz", "integrity": "sha512-7sWsQlYL2rGLy2IWm8WL8DCTJvYLc/qlOnsakDac87SOoCd16WLsaAMdCiAqsTNHIe+SXfaqyxyo6THoWqs8WQ==", - "dev": true, "requires": { "resolve": "^1.17.0" } @@ -3674,20 +10045,17 @@ "moment": { "version": "2.27.0", "resolved": "https://registry.npmjs.org/moment/-/moment-2.27.0.tgz", - "integrity": "sha512-al0MUK7cpIcglMv3YF13qSgdAIqxHTO7brRtaz3DlSULbqfazqkc5kEjNrLDOM7fsjshoFIihnU8snrP7zUvhQ==", - "dev": true + "integrity": "sha512-al0MUK7cpIcglMv3YF13qSgdAIqxHTO7brRtaz3DlSULbqfazqkc5kEjNrLDOM7fsjshoFIihnU8snrP7zUvhQ==" }, "mongo-uri": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/mongo-uri/-/mongo-uri-0.1.2.tgz", - "integrity": "sha1-FzrwFAMzkALgq9C01nWYfTzc+Z4=", - "dev": true + "integrity": "sha1-FzrwFAMzkALgq9C01nWYfTzc+Z4=" }, "mongodb": { "version": "2.2.35", "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-2.2.35.tgz", "integrity": "sha512-3HGLucDg/8EeYMin3k+nFWChTA85hcYDCw1lPsWR6yV9A6RgKb24BkLiZ9ySZR+S0nfBjWoIUS7cyV6ceGx5Gg==", - "dev": true, "requires": { "es6-promise": "3.2.1", "mongodb-core": "2.1.19", @@ -3697,14 +10065,12 @@ "process-nextick-args": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz", - "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=", - "dev": true + "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=" }, "readable-stream": { "version": "2.2.7", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.2.7.tgz", "integrity": "sha1-BwV6y+JGeyIELTb5jFrVBwVOlbE=", - "dev": true, "requires": { "buffer-shims": "~1.0.0", "core-util-is": "~1.0.0", @@ -3719,7 +10085,6 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.0.3.tgz", "integrity": "sha512-4AH6Z5fzNNBcH+6XDMfA/BTt87skxqJlO0lAh3Dker5zThcAxG6mKz+iGu308UKoPPQ8Dcqx/4JhujzltRa+hQ==", - "dev": true, "requires": { "safe-buffer": "~5.1.0" } @@ -3730,7 +10095,6 @@ "version": "2.1.19", "resolved": "https://registry.npmjs.org/mongodb-core/-/mongodb-core-2.1.19.tgz", "integrity": "sha512-Jt4AtWUkpuW03kRdYGxga4O65O1UHlFfvvInslEfLlGi+zDMxbBe3J2NVmN9qPJ957Mn6Iz0UpMtV80cmxCVxw==", - "dev": true, "requires": { "bson": "~1.0.4", "require_optional": "~1.0.0" @@ -3739,14 +10103,12 @@ "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "msgpack-js": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/msgpack-js/-/msgpack-js-0.3.0.tgz", "integrity": "sha1-Aw7AjFlW+cp9F9QKVy1Tlv7BCSM=", - "dev": true, "requires": { "bops": "~0.0.6" } @@ -3755,7 +10117,6 @@ "version": "4.1.4", "resolved": "https://registry.npmjs.org/multiparty/-/multiparty-4.1.4.tgz", "integrity": "sha1-TJbcvcEeP3kX4WFeZAtLUCK+ZP0=", - "dev": true, "requires": { "fd-slicer": "~1.0.1", "safe-buffer": "5.1.2" @@ -3765,14 +10126,12 @@ "version": "2.14.1", "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.1.tgz", "integrity": "sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw==", - "dev": true, "optional": true }, "nanomatch": { "version": "1.2.13", "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", - "dev": true, "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -3790,40 +10149,34 @@ "arr-diff": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "dev": true + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=" }, "array-unique": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "dev": true + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=" }, "kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==" } } }, "ncp": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ncp/-/ncp-2.0.0.tgz", - "integrity": "sha1-GVoh1sRuNh0vsSgbo4uR6d9727M=", - "dev": true + "integrity": "sha1-GVoh1sRuNh0vsSgbo4uR6d9727M=" }, "negotiator": { "version": "0.6.2", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", - "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", - "dev": true + "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==" }, "next-tick": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", - "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=", - "dev": true + "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" }, "node-gyp-build": { "version": "4.2.3", @@ -3833,14 +10186,12 @@ "nodemailer": { "version": "6.4.11", "resolved": "https://registry.npmjs.org/nodemailer/-/nodemailer-6.4.11.tgz", - "integrity": "sha512-BVZBDi+aJV4O38rxsUh164Dk1NCqgh6Cm0rQSb9SK/DHGll/DrCMnycVDD7msJgZCnmVa8ASo8EZzR7jsgTukQ==", - "dev": true + "integrity": "sha512-BVZBDi+aJV4O38rxsUh164Dk1NCqgh6Cm0rQSb9SK/DHGll/DrCMnycVDD7msJgZCnmVa8ASo8EZzR7jsgTukQ==" }, "normalize-path": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", - "dev": true, "requires": { "remove-trailing-separator": "^1.0.1" } @@ -3848,20 +10199,17 @@ "notepack.io": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/notepack.io/-/notepack.io-2.1.3.tgz", - "integrity": "sha512-AgSt+cP5XMooho1Ppn8NB3FFaVWefV+qZoZncYTUSch2GAEwlYLcIIbT5YVkMlFeNHnfwOvc4HDlbvrB5BRxXA==", - "dev": true + "integrity": "sha512-AgSt+cP5XMooho1Ppn8NB3FFaVWefV+qZoZncYTUSch2GAEwlYLcIIbT5YVkMlFeNHnfwOvc4HDlbvrB5BRxXA==" }, "number-is-nan": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", - "dev": true + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" }, "nunjucks": { "version": "2.4.3", "resolved": "https://registry.npmjs.org/nunjucks/-/nunjucks-2.4.3.tgz", "integrity": "sha1-lhzLDzGABI7ptpzLboLBy5ReXs0=", - "dev": true, "requires": { "asap": "^2.0.3", "chokidar": "^1.0.0", @@ -3872,25 +10220,22 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/nunjucks-markdown/-/nunjucks-markdown-2.0.1.tgz", "integrity": "sha1-1V51Qzo1hQ4sNFZR/j+THtmxVqI=", - "dev": true + "requires": {} }, "object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=" }, "object-component": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/object-component/-/object-component-0.0.3.tgz", - "integrity": "sha1-8MaapQ78lbhmwYb0AKM3acsvEpE=", - "dev": true + "integrity": "sha1-8MaapQ78lbhmwYb0AKM3acsvEpE=" }, "object-copy": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", - "dev": true, "requires": { "copy-descriptor": "^0.1.0", "define-property": "^0.2.5", @@ -3901,7 +10246,6 @@ "version": "0.2.5", "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, "requires": { "is-descriptor": "^0.1.0" } @@ -3912,7 +10256,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", - "dev": true, "requires": { "isobject": "^3.0.0" }, @@ -3920,8 +10263,7 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" } } }, @@ -3929,7 +10271,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/object.omit/-/object.omit-2.0.1.tgz", "integrity": "sha1-Gpx0SCnznbuFjHbKNXmuKlTr0fo=", - "dev": true, "requires": { "for-own": "^0.1.4", "is-extendable": "^0.1.1" @@ -3939,7 +10280,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", - "dev": true, "requires": { "isobject": "^3.0.1" }, @@ -3947,8 +10287,7 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" } } }, @@ -3956,7 +10295,6 @@ "version": "2.3.0", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", - "dev": true, "requires": { "ee-first": "1.1.1" } @@ -3964,8 +10302,7 @@ "on-headers": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "dev": true + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==" }, "once": { "version": "1.4.0", @@ -3979,7 +10316,6 @@ "version": "4.0.11", "resolved": "https://registry.npmjs.org/ono/-/ono-4.0.11.tgz", "integrity": "sha512-jQ31cORBFE6td25deYeD80wxKBMj+zBmHTrVxnc6CKhx8gho6ipmWM5zj/oeoqioZ99yqBls9Z/9Nss7J26G2g==", - "dev": true, "requires": { "format-util": "^1.0.3" } @@ -3987,14 +10323,12 @@ "os-browserify": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", - "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=", - "dev": true + "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=" }, "os-locale": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz", "integrity": "sha1-IPnxeuKe00XoveWDsT0gCYA8FNk=", - "dev": true, "requires": { "lcid": "^1.0.0" } @@ -4003,7 +10337,6 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/os-name/-/os-name-1.0.3.tgz", "integrity": "sha1-GzefZINa98Wn9JizV8uVIVwVnt8=", - "dev": true, "requires": { "osx-release": "^1.0.0", "win-release": "^1.0.0" @@ -4013,7 +10346,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/osx-release/-/osx-release-1.1.0.tgz", "integrity": "sha1-8heRGigTaUmvG/kwiyQeJzfTzWw=", - "dev": true, "requires": { "minimist": "^1.1.0" }, @@ -4021,22 +10353,19 @@ "minimist": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" } } }, "pako": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", - "dev": true + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" }, "parents": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parents/-/parents-1.0.1.tgz", "integrity": "sha1-/t1NK/GTp3dF/nHjcdc8MwfZx1E=", - "dev": true, "requires": { "path-platform": "~0.11.15" } @@ -4045,7 +10374,6 @@ "version": "5.1.5", "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.5.tgz", "integrity": "sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==", - "dev": true, "requires": { "asn1.js": "^4.0.0", "browserify-aes": "^1.0.0", @@ -4059,7 +10387,6 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/parse-glob/-/parse-glob-3.0.4.tgz", "integrity": "sha1-ssN2z7EfNVE7rdFz7wu246OIORw=", - "dev": true, "requires": { "glob-base": "^0.3.0", "is-dotfile": "^1.0.0", @@ -4070,14 +10397,12 @@ "parse-srcset": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/parse-srcset/-/parse-srcset-1.0.2.tgz", - "integrity": "sha1-8r0iH2zJcKk42IVWq8WJyqqiveE=", - "dev": true + "integrity": "sha1-8r0iH2zJcKk42IVWq8WJyqqiveE=" }, "parseqs": { "version": "0.0.5", "resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.5.tgz", "integrity": "sha1-1SCKNzjkZ2bikbouoXNoSSGouJ0=", - "dev": true, "requires": { "better-assert": "~1.0.0" } @@ -4086,7 +10411,6 @@ "version": "0.0.5", "resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.5.tgz", "integrity": "sha1-gCBKUNTbt3m/3G6+J3jZDkvOMgo=", - "dev": true, "requires": { "better-assert": "~1.0.0" } @@ -4094,20 +10418,17 @@ "parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "dev": true + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" }, "pascalcase": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", - "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", - "dev": true + "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=" }, "path-browserify": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz", - "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==", - "dev": true + "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==" }, "path-is-absolute": { "version": "1.0.1", @@ -4117,26 +10438,22 @@ "path-parse": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==", - "dev": true + "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" }, "path-platform": { "version": "0.11.15", "resolved": "https://registry.npmjs.org/path-platform/-/path-platform-0.11.15.tgz", - "integrity": "sha1-6GQhf3TDaFDwhSt43Hv31KVyG/I=", - "dev": true + "integrity": "sha1-6GQhf3TDaFDwhSt43Hv31KVyG/I=" }, "path-to-regexp": { "version": "0.1.7", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=", - "dev": true + "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" }, "pbkdf2": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.1.tgz", "integrity": "sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg==", - "dev": true, "requires": { "create-hash": "^1.1.2", "create-hmac": "^1.1.4", @@ -4148,26 +10465,22 @@ "pend": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", - "dev": true + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=" }, "pluralize": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-1.1.6.tgz", - "integrity": "sha1-5L9dazayr8IsgB98Nyk2F+C9xW0=", - "dev": true + "integrity": "sha1-5L9dazayr8IsgB98Nyk2F+C9xW0=" }, "posix-character-classes": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", - "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", - "dev": true + "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { "version": "7.0.32", "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.32.tgz", "integrity": "sha512-03eXong5NLnNCD05xscnGKGDZ98CyzoqPSMjOe6SuoQY7Z2hIj0Ld1g/O/UQRuOle2aRtiIRDg9tDcTGAkLfKw==", - "dev": true, "requires": { "chalk": "^2.4.2", "source-map": "^0.6.1", @@ -4177,14 +10490,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" }, "supports-color": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dev": true, "requires": { "has-flag": "^3.0.0" } @@ -4194,32 +10505,27 @@ "prepend-http": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz", - "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=", - "dev": true + "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=" }, "preserve": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz", - "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=", - "dev": true + "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=" }, "process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", - "dev": true + "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=" }, "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", - "dev": true + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" }, "proxy-addr": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", - "dev": true, "requires": { "forwarded": "~0.1.2", "ipaddr.js": "1.9.1" @@ -4229,7 +10535,6 @@ "version": "4.0.3", "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", - "dev": true, "requires": { "bn.js": "^4.1.0", "browserify-rsa": "^4.0.0", @@ -4242,46 +10547,39 @@ "bn.js": { "version": "4.11.9", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==", - "dev": true + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" } } }, "punycode": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=", - "dev": true + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" }, "q": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", - "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", - "dev": true + "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=" }, "qs": { "version": "6.5.2", "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", - "dev": true + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" }, "querystring": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", - "dev": true + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=" }, "querystring-es3": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", - "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=", - "dev": true + "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=" }, "raml-jsonschema-expander": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/raml-jsonschema-expander/-/raml-jsonschema-expander-1.1.2.tgz", "integrity": "sha1-nFagwcCpGQjUxBuiVu/EnryG3Uw=", - "dev": true, "requires": { "urllib-sync": "^1.1.0" } @@ -4290,7 +10588,6 @@ "version": "0.8.18", "resolved": "https://registry.npmjs.org/raml-parser/-/raml-parser-0.8.18.tgz", "integrity": "sha1-CHM3UDT4uKHSDBDjFHtiebM0l6g=", - "dev": true, "requires": { "got": "~2.4.0", "jju": "~1.2.0", @@ -4304,8 +10601,7 @@ "q": { "version": "0.9.7", "resolved": "https://registry.npmjs.org/q/-/q-0.9.7.tgz", - "integrity": "sha1-TeLmyzspCIyeTLwDv51C+5bOL3U=", - "dev": true + "integrity": "sha1-TeLmyzspCIyeTLwDv51C+5bOL3U=" } } }, @@ -4313,7 +10609,6 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/raml2html/-/raml2html-3.0.1.tgz", "integrity": "sha1-aqeaoJg+o8SgfwF3UceBOCmmUhg=", - "dev": true, "requires": { "commander": "2.9.x", "marked": "0.3.x", @@ -4328,7 +10623,6 @@ "version": "2.9.0", "resolved": "https://registry.npmjs.org/commander/-/commander-2.9.0.tgz", "integrity": "sha1-nJkJQXbhIkDLItbFFGCYQA/g99Q=", - "dev": true, "requires": { "graceful-readlink": ">= 1.0.0" } @@ -4336,8 +10630,7 @@ "marked": { "version": "0.3.19", "resolved": "https://registry.npmjs.org/marked/-/marked-0.3.19.tgz", - "integrity": "sha512-ea2eGWOqNxPcXv8dyERdSr/6FmzvWwzjMxpfGB/sbMccXoct+xY+YukPD+QTUZwyvK7BZwcr4m21WBOW41pAkg==", - "dev": true + "integrity": "sha512-ea2eGWOqNxPcXv8dyERdSr/6FmzvWwzjMxpfGB/sbMccXoct+xY+YukPD+QTUZwyvK7BZwcr4m21WBOW41pAkg==" } } }, @@ -4345,7 +10638,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/raml2obj/-/raml2obj-3.0.0.tgz", "integrity": "sha1-9w9XPGci4osw1C/bdUOkhoWR678=", - "dev": true, "requires": { "raml-parser": "0.8.x" } @@ -4354,7 +10646,6 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.1.1.tgz", "integrity": "sha512-TuDE5KxZ0J461RVjrJZCJc+J+zCkTb1MbH9AQUq68sMhOMcy9jLcb3BrZKgp9q9Ncltdg4QVqWrH02W2EFFVYw==", - "dev": true, "requires": { "is-number": "^4.0.0", "kind-of": "^6.0.0", @@ -4364,14 +10655,12 @@ "is-number": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", - "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==", - "dev": true + "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==" }, "kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==" } } }, @@ -4379,7 +10668,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dev": true, "requires": { "safe-buffer": "^5.1.0" } @@ -4388,7 +10676,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", - "dev": true, "requires": { "randombytes": "^2.0.5", "safe-buffer": "^5.1.0" @@ -4397,14 +10684,12 @@ "range-parser": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "dev": true + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" }, "raw-body": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz", "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==", - "dev": true, "requires": { "bytes": "3.0.0", "http-errors": "1.6.3", @@ -4415,14 +10700,12 @@ "read-all-stream": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/read-all-stream/-/read-all-stream-1.0.2.tgz", - "integrity": "sha1-03jPTvbiNrGI6kLRNeWxgKiePpI=", - "dev": true + "integrity": "sha1-03jPTvbiNrGI6kLRNeWxgKiePpI=" }, "read-only-stream": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/read-only-stream/-/read-only-stream-2.0.0.tgz", "integrity": "sha1-JyT9aoET1zdkrCiNQ4YnDB2/F/A=", - "dev": true, "requires": { "readable-stream": "^2.0.2" } @@ -4431,7 +10714,6 @@ "version": "2.3.7", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dev": true, "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -4446,7 +10728,6 @@ "version": "2.2.1", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", - "dev": true, "requires": { "graceful-fs": "^4.1.11", "micromatch": "^3.1.10", @@ -4456,20 +10737,17 @@ "arr-diff": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "dev": true + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=" }, "array-unique": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "dev": true + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=" }, "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "dev": true, "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -4487,7 +10765,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -4498,7 +10775,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -4507,7 +10783,6 @@ "version": "2.1.4", "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", - "dev": true, "requires": { "debug": "^2.3.3", "define-property": "^0.2.5", @@ -4522,7 +10797,6 @@ "version": "0.2.5", "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, "requires": { "is-descriptor": "^0.1.0" } @@ -4531,7 +10805,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -4540,7 +10813,6 @@ "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -4549,7 +10821,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -4560,7 +10831,6 @@ "version": "0.1.4", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -4569,7 +10839,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -4580,7 +10849,6 @@ "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dev": true, "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -4590,8 +10858,7 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "dev": true + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" } } }, @@ -4599,7 +10866,6 @@ "version": "2.0.4", "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", - "dev": true, "requires": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -4615,7 +10881,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dev": true, "requires": { "is-descriptor": "^1.0.0" } @@ -4624,7 +10889,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -4635,7 +10899,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", - "dev": true, "requires": { "extend-shallow": "^2.0.1", "is-number": "^3.0.0", @@ -4647,7 +10910,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -4658,7 +10920,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, "requires": { "kind-of": "^6.0.0" } @@ -4667,7 +10928,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, "requires": { "kind-of": "^6.0.0" } @@ -4676,7 +10936,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -4687,7 +10946,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -4696,7 +10954,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -4706,20 +10963,17 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" }, "kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==" }, "micromatch": { "version": "3.1.10", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", - "dev": true, "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -4739,8 +10993,7 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } }, @@ -4748,7 +11001,6 @@ "version": "2.8.0", "resolved": "https://registry.npmjs.org/redis/-/redis-2.8.0.tgz", "integrity": "sha512-M1OkonEQwtRmZv4tEWF2VgpG0JWJ8Fv1PhlgT5+B+uNq2cA3Rt1Yt/ryoR+vQNOQcIEgdCdfH0jr3bDpihAw1A==", - "dev": true, "requires": { "double-ended-queue": "^2.1.0-0", "redis-commands": "^1.2.0", @@ -4758,20 +11010,17 @@ "redis-commands": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.6.0.tgz", - "integrity": "sha512-2jnZ0IkjZxvguITjFTrGiLyzQZcTvaw8DAaCXxZq/dsHXz7KfMQ3OUJy7Tz9vnRtZRVz6VRCPDvruvU8Ts44wQ==", - "dev": true + "integrity": "sha512-2jnZ0IkjZxvguITjFTrGiLyzQZcTvaw8DAaCXxZq/dsHXz7KfMQ3OUJy7Tz9vnRtZRVz6VRCPDvruvU8Ts44wQ==" }, "redis-parser": { "version": "2.6.0", "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz", - "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs=", - "dev": true + "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs=" }, "regex-cache": { "version": "0.4.4", "resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz", "integrity": "sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ==", - "dev": true, "requires": { "is-equal-shallow": "^0.1.3" } @@ -4780,7 +11029,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", - "dev": true, "requires": { "extend-shallow": "^3.0.2", "safe-regex": "^1.1.0" @@ -4789,36 +11037,22 @@ "remove-trailing-separator": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", - "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", - "dev": true + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" }, "repeat-element": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz", - "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==", - "dev": true + "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==" }, "repeat-string": { "version": "1.6.1", "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", - "dev": true - }, - "require-uncached": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/require-uncached/-/require-uncached-1.0.3.tgz", - "integrity": "sha1-Tg1W1slmL9MeQwEcS5WqSZVUIdM=", - "dev": true, - "requires": { - "caller-path": "^0.1.0", - "resolve-from": "^1.0.0" - } + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=" }, "require_optional": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/require_optional/-/require_optional-1.0.1.tgz", "integrity": "sha512-qhM/y57enGWHAe3v/NcwML6a3/vfESLe/sGM2dII+gEO0BpKRUkWZow/tyloNqJyN6kXSl3RyyM8Ll5D/sJP8g==", - "dev": true, "requires": { "resolve-from": "^2.0.0", "semver": "^5.1.0" @@ -4827,28 +11061,33 @@ "resolve-from": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-2.0.0.tgz", - "integrity": "sha1-lICrIOlP+h2egKgEx+oUdhGWa1c=", - "dev": true + "integrity": "sha1-lICrIOlP+h2egKgEx+oUdhGWa1c=" } } }, + "require-uncached": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/require-uncached/-/require-uncached-1.0.3.tgz", + "integrity": "sha1-Tg1W1slmL9MeQwEcS5WqSZVUIdM=", + "requires": { + "caller-path": "^0.1.0", + "resolve-from": "^1.0.0" + } + }, "requirejs": { "version": "2.3.5", "resolved": "https://registry.npmjs.org/requirejs/-/requirejs-2.3.5.tgz", - "integrity": "sha512-svnO+aNcR/an9Dpi44C7KSAy5fFGLtmPbaaCeQaklUz8BQhS64tWWIIlvEA5jrWICzlO/X9KSzSeXFnZdBu8nw==", - "dev": true + "integrity": "sha512-svnO+aNcR/an9Dpi44C7KSAy5fFGLtmPbaaCeQaklUz8BQhS64tWWIIlvEA5jrWICzlO/X9KSzSeXFnZdBu8nw==" }, "requirejs-text": { "version": "2.0.15", "resolved": "https://registry.npmjs.org/requirejs-text/-/requirejs-text-2.0.15.tgz", - "integrity": "sha1-ExOHM2E/xEV7fhJH6Mt1HfeqVCk=", - "dev": true + "integrity": "sha1-ExOHM2E/xEV7fhJH6Mt1HfeqVCk=" }, "requizzle": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/requizzle/-/requizzle-0.2.3.tgz", "integrity": "sha512-YanoyJjykPxGHii0fZP0uUPEXpvqfBDxWV7s6GKAiiOsiqhX6vHNyW3Qzdmqp/iq/ExbhaGbVrjB4ruEVSM4GQ==", - "dev": true, "requires": { "lodash": "^4.17.14" } @@ -4857,7 +11096,6 @@ "version": "1.17.0", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.17.0.tgz", "integrity": "sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==", - "dev": true, "requires": { "path-parse": "^1.0.6" } @@ -4865,20 +11103,17 @@ "resolve-from": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-1.0.1.tgz", - "integrity": "sha1-Jsv+k10a7uq7Kbw/5a6wHpPUQiY=", - "dev": true + "integrity": "sha1-Jsv+k10a7uq7Kbw/5a6wHpPUQiY=" }, "resolve-url": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", - "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=", - "dev": true + "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=" }, "ret": { "version": "0.1.15", "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", - "dev": true + "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==" }, "rimraf": { "version": "2.6.2", @@ -4915,7 +11150,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", - "dev": true, "requires": { "hash-base": "^3.0.0", "inherits": "^2.0.1" @@ -4924,14 +11158,12 @@ "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" }, "safe-regex": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", - "dev": true, "requires": { "ret": "~0.1.10" } @@ -4939,14 +11171,12 @@ "safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sanitize-html": { "version": "1.27.2", "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-1.27.2.tgz", "integrity": "sha512-REZETvhFFChM3zyQS8XoR02j5U56HtyQkxsc8cb5HEi3XU0AAX9TuKvWe3ESR0F0IA81ZghA+5YpJg8C35AFyQ==", - "dev": true, "requires": { "htmlparser2": "^4.1.0", "lodash": "^4.17.15", @@ -4957,20 +11187,17 @@ "sax": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", - "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=", - "dev": true + "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=" }, "semver": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" }, "send": { "version": "0.16.2", "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", - "dev": true, "requires": { "debug": "2.6.9", "depd": "~1.1.2", @@ -4991,7 +11218,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -4999,20 +11225,17 @@ "mime": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", - "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==", - "dev": true + "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" }, "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, "statuses": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", - "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==", - "dev": true + "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" } } }, @@ -5020,7 +11243,6 @@ "version": "1.13.2", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz", "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==", - "dev": true, "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -5032,7 +11254,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", - "dev": true, "requires": { "extend-shallow": "^2.0.1", "is-extendable": "^0.1.1", @@ -5044,7 +11265,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -5054,14 +11274,12 @@ "setprototypeof": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", - "dev": true + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" }, "sha.js": { "version": "2.4.11", "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "dev": true, "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -5071,7 +11289,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/shasum/-/shasum-1.0.2.tgz", "integrity": "sha1-5wEjENj0F/TetXEhUOVni4euVl8=", - "dev": true, "requires": { "json-stable-stringify": "~0.0.0", "sha.js": "~2.4.4" @@ -5081,7 +11298,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/shasum-object/-/shasum-object-1.0.0.tgz", "integrity": "sha512-Iqo5rp/3xVi6M4YheapzZhhGPVs0yZwHj7wvwQ1B9z8H6zk+FEnI7y3Teq7qwnekfEhu8WmG2z0z4iWZaxLWVg==", - "dev": true, "requires": { "fast-safe-stringify": "^2.0.7" } @@ -5089,20 +11305,17 @@ "shell-quote": { "version": "1.7.2", "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.7.2.tgz", - "integrity": "sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg==", - "dev": true + "integrity": "sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg==" }, "simple-concat": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", - "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", - "dev": true + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==" }, "snapdragon": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", - "dev": true, "requires": { "base": "^0.11.1", "debug": "^2.2.0", @@ -5118,7 +11331,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -5127,7 +11339,6 @@ "version": "0.2.5", "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, "requires": { "is-descriptor": "^0.1.0" } @@ -5136,7 +11347,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -5144,8 +11354,7 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } }, @@ -5153,7 +11362,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", - "dev": true, "requires": { "define-property": "^1.0.0", "isobject": "^3.0.0", @@ -5164,7 +11372,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dev": true, "requires": { "is-descriptor": "^1.0.0" } @@ -5173,7 +11380,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, "requires": { "kind-of": "^6.0.0" } @@ -5182,7 +11388,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, "requires": { "kind-of": "^6.0.0" } @@ -5191,7 +11396,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -5201,14 +11405,12 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" }, "kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==" } } }, @@ -5216,7 +11418,6 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", - "dev": true, "requires": { "kind-of": "^3.2.0" } @@ -5225,7 +11426,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.1.1.tgz", "integrity": "sha512-rORqq9c+7W0DAK3cleWNSyfv/qKXV99hV4tZe+gGLfBECw3XEhBy7x85F3wypA9688LKjtwO9pX9L33/xQI8yA==", - "dev": true, "requires": { "debug": "~3.1.0", "engine.io": "~3.2.0", @@ -5238,14 +11438,12 @@ "socket.io-adapter": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.2.tgz", - "integrity": "sha512-WzZRUj1kUjrTIrUKpZLEzFZ1OLj5FwLlAFQs9kuZJzJi5DKdU7FsWc36SNmA8iDOtwBQyT8FkrriRM8vXLYz8g==", - "dev": true + "integrity": "sha512-WzZRUj1kUjrTIrUKpZLEzFZ1OLj5FwLlAFQs9kuZJzJi5DKdU7FsWc36SNmA8iDOtwBQyT8FkrriRM8vXLYz8g==" }, "socket.io-client": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.1.1.tgz", "integrity": "sha512-jxnFyhAuFxYfjqIgduQlhzqTcOEQSn+OHKVfAxWaNWa7ecP7xSNk2Dx/3UEsDcY7NcFafxvNvKPmmO7HTwTxGQ==", - "dev": true, "requires": { "backo2": "1.0.2", "base64-arraybuffer": "0.1.5", @@ -5266,8 +11464,7 @@ "component-emitter": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", - "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=", - "dev": true + "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=" } } }, @@ -5275,7 +11472,6 @@ "version": "3.2.0", "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.2.0.tgz", "integrity": "sha512-FYiBx7rc/KORMJlgsXysflWx/RIvtqZbyGLlHZvjfmPTPeuD/I8MaW7cfFrj5tRltICJdgwflhfZ3NVVbVLFQA==", - "dev": true, "requires": { "component-emitter": "1.2.1", "debug": "~3.1.0", @@ -5285,14 +11481,12 @@ "component-emitter": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", - "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=", - "dev": true + "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=" }, "isarray": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.1.tgz", - "integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4=", - "dev": true + "integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4=" } } }, @@ -5300,7 +11494,6 @@ "version": "5.2.0", "resolved": "https://registry.npmjs.org/socket.io-redis/-/socket.io-redis-5.2.0.tgz", "integrity": "sha1-j+KtlEX8UIhvtwq8dZ1nQD1Ymd8=", - "dev": true, "requires": { "debug": "~2.6.8", "notepack.io": "~2.1.2", @@ -5313,7 +11506,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -5321,22 +11513,19 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } }, "source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "dev": true + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=" }, "source-map-resolve": { "version": "0.5.3", "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", - "dev": true, "requires": { "atob": "^2.1.2", "decode-uri-component": "^0.2.0", @@ -5348,14 +11537,12 @@ "source-map-url": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz", - "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=", - "dev": true + "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=" }, "split-string": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", - "dev": true, "requires": { "extend-shallow": "^3.0.0" } @@ -5363,20 +11550,17 @@ "sprintf-js": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", - "dev": true + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" }, "stack-trace": { "version": "0.0.10", "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", - "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=", - "dev": true + "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=" }, "static-extend": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", - "dev": true, "requires": { "define-property": "^0.2.5", "object-copy": "^0.1.0" @@ -5386,7 +11570,6 @@ "version": "0.2.5", "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, "requires": { "is-descriptor": "^0.1.0" } @@ -5396,14 +11579,12 @@ "statuses": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", - "dev": true + "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=" }, "stream-browserify": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", - "dev": true, "requires": { "inherits": "~2.0.1", "readable-stream": "^2.0.2" @@ -5413,7 +11594,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/stream-combiner2/-/stream-combiner2-1.1.1.tgz", "integrity": "sha1-+02KFCDqNidk4hrUeAOXvry0HL4=", - "dev": true, "requires": { "duplexer2": "~0.1.0", "readable-stream": "^2.0.2" @@ -5423,7 +11603,6 @@ "version": "2.8.3", "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", - "dev": true, "requires": { "builtin-status-codes": "^3.0.0", "inherits": "^2.0.1", @@ -5435,44 +11614,39 @@ "stream-shift": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", - "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==", - "dev": true + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" }, "stream-splicer": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/stream-splicer/-/stream-splicer-2.0.1.tgz", "integrity": "sha512-Xizh4/NPuYSyAXyT7g8IvdJ9HJpxIGL9PjyhtywCZvvP0OPIdqyrr4dMikeuvY8xahpdKEBlBTySe583totajg==", - "dev": true, "requires": { "inherits": "^2.0.1", "readable-stream": "^2.0.2" } }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + }, "string-width": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, "requires": { "code-point-at": "^1.0.0", "is-fullwidth-code-point": "^1.0.0", "strip-ansi": "^3.0.0" } }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - }, "strip-ansi": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, "requires": { "ansi-regex": "^2.0.0" } @@ -5480,14 +11654,12 @@ "strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==" }, "subarg": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/subarg/-/subarg-1.0.0.tgz", "integrity": "sha1-9izxdYHplrSPyWVpn1TAauJouNI=", - "dev": true, "requires": { "minimist": "^1.1.0" }, @@ -5495,8 +11667,7 @@ "minimist": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" } } }, @@ -5504,7 +11675,6 @@ "version": "3.8.3", "resolved": "https://registry.npmjs.org/superagent/-/superagent-3.8.3.tgz", "integrity": "sha512-GLQtLMCoEIK4eDv6OGtkOoSMt3D+oq0y3dsxMuYuDvaNUvuT8eFBuLmfR0iYYzHC1e8hpzC6ZsxbuP6DIalMFA==", - "dev": true, "requires": { "component-emitter": "^1.2.0", "cookiejar": "^2.1.0", @@ -5522,7 +11692,6 @@ "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", - "dev": true, "requires": { "has-flag": "^3.0.0" } @@ -5531,7 +11700,6 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/syntax-error/-/syntax-error-1.4.0.tgz", "integrity": "sha512-YPPlu67mdnHGTup2A8ff7BC2Pjq0e0Yp/IyTFN03zWO0RcK07uLcbi7C2KpGR2FvWbaB0+bfE27a+sBKebSo7w==", - "dev": true, "requires": { "acorn-node": "^1.2.0" } @@ -5539,14 +11707,12 @@ "taffydb": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/taffydb/-/taffydb-2.6.2.tgz", - "integrity": "sha1-fLy2S1oUG2ou/CxdLGe04VCyomg=", - "dev": true + "integrity": "sha1-fLy2S1oUG2ou/CxdLGe04VCyomg=" }, "tar-stream": { "version": "1.6.2", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", - "dev": true, "requires": { "bl": "^1.0.0", "buffer-alloc": "^1.2.0", @@ -5560,20 +11726,17 @@ "text-hex": { "version": "0.0.0", "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-0.0.0.tgz", - "integrity": "sha1-V4+8haapJjbkLdF7QdAhjM6esrM=", - "dev": true + "integrity": "sha1-V4+8haapJjbkLdF7QdAhjM6esrM=" }, "through": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", - "dev": true + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" }, "through2": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", - "dev": true, "requires": { "readable-stream": "~2.3.6", "xtend": "~4.0.1" @@ -5582,14 +11745,12 @@ "timed-out": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-2.0.0.tgz", - "integrity": "sha1-84sK6B03R9YoAB9B2vxlKs5nHAo=", - "dev": true + "integrity": "sha1-84sK6B03R9YoAB9B2vxlKs5nHAo=" }, "timers-browserify": { "version": "1.4.2", "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-1.4.2.tgz", "integrity": "sha1-ycWLV1voQHN1y14kYtrO50NZ9B0=", - "dev": true, "requires": { "process": "~0.11.0" } @@ -5598,7 +11759,6 @@ "version": "0.1.7", "resolved": "https://registry.npmjs.org/timers-ext/-/timers-ext-0.1.7.tgz", "integrity": "sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==", - "dev": true, "requires": { "es5-ext": "~0.10.46", "next-tick": "1" @@ -5607,26 +11767,22 @@ "to-array": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/to-array/-/to-array-0.1.4.tgz", - "integrity": "sha1-F+bBH3PdTz10zaek/zI46a2b+JA=", - "dev": true + "integrity": "sha1-F+bBH3PdTz10zaek/zI46a2b+JA=" }, "to-arraybuffer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", - "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=", - "dev": true + "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=" }, "to-buffer": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", - "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==", - "dev": true + "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==" }, "to-object-path": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", - "dev": true, "requires": { "kind-of": "^3.0.2" } @@ -5635,7 +11791,6 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", - "dev": true, "requires": { "define-property": "^2.0.2", "extend-shallow": "^3.0.2", @@ -5647,7 +11802,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", - "dev": true, "requires": { "is-number": "^3.0.0", "repeat-string": "^1.6.1" @@ -5657,7 +11811,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dev": true, "requires": { "kind-of": "^3.0.2" } @@ -5667,26 +11820,22 @@ "to-utf8": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/to-utf8/-/to-utf8-0.0.1.tgz", - "integrity": "sha1-0Xrqcv8vujm55DYBvns/9y4ImFI=", - "dev": true + "integrity": "sha1-0Xrqcv8vujm55DYBvns/9y4ImFI=" }, "toidentifier": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", - "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==", - "dev": true + "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==" }, "tty-browserify": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.1.tgz", - "integrity": "sha512-C3TaO7K81YvjCgQH9Q1S3R3P3BtN3RIM8n+OvX4il1K1zgE8ZhI0op7kClgkxtutIE8hQrcrHBXvIheqKUUCxw==", - "dev": true + "integrity": "sha512-C3TaO7K81YvjCgQH9Q1S3R3P3BtN3RIM8n+OvX4il1K1zgE8ZhI0op7kClgkxtutIE8hQrcrHBXvIheqKUUCxw==" }, "type": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", - "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==", - "dev": true + "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==" }, "type-detect": { "version": "1.0.0", @@ -5698,7 +11847,6 @@ "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "dev": true, "requires": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -5707,38 +11855,32 @@ "typedarray": { "version": "0.0.6", "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", - "dev": true + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" }, "uc.micro": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==", - "dev": true + "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" }, "uid2": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/uid2/-/uid2-0.0.3.tgz", - "integrity": "sha1-SDEm4Rd03y9xuLY53NeZw3YWK4I=", - "dev": true + "integrity": "sha1-SDEm4Rd03y9xuLY53NeZw3YWK4I=" }, "ultron": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz", - "integrity": "sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og==", - "dev": true + "integrity": "sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og==" }, "umd": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/umd/-/umd-3.0.3.tgz", - "integrity": "sha512-4IcGSufhFshvLNcMCV80UnQVlZ5pMOC8mvNPForqwA4+lzYQuetTESLDQkeLmihq8bRcnpbQa48Wb8Lh16/xow==", - "dev": true + "integrity": "sha512-4IcGSufhFshvLNcMCV80UnQVlZ5pMOC8mvNPForqwA4+lzYQuetTESLDQkeLmihq8bRcnpbQa48Wb8Lh16/xow==" }, "undeclared-identifiers": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/undeclared-identifiers/-/undeclared-identifiers-1.1.3.tgz", "integrity": "sha512-pJOW4nxjlmfwKApE4zvxLScM/njmwj/DiUBv7EabwE4O8kRUy+HIwxQtZLBPll/jx1LJyBcqNfB3/cpv9EZwOw==", - "dev": true, "requires": { "acorn-node": "^1.3.0", "dash-ast": "^1.0.0", @@ -5750,14 +11892,12 @@ "underscore": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.9.1.tgz", - "integrity": "sha512-5/4etnCkd9c8gwgowi5/om/mYO5ajCaOgdzj/oW+0eQV9WxKBDZw5+ycmKmeaTXjInS/W0BzpGLo2xR2aBwZdg==", - "dev": true + "integrity": "sha512-5/4etnCkd9c8gwgowi5/om/mYO5ajCaOgdzj/oW+0eQV9WxKBDZw5+ycmKmeaTXjInS/W0BzpGLo2xR2aBwZdg==" }, "union-value": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", - "dev": true, "requires": { "arr-union": "^3.1.0", "get-value": "^2.0.6", @@ -5768,14 +11908,12 @@ "unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", - "dev": true + "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=" }, "unset-value": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", - "dev": true, "requires": { "has-value": "^0.3.1", "isobject": "^3.0.0" @@ -5785,7 +11923,6 @@ "version": "0.3.1", "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", - "dev": true, "requires": { "get-value": "^2.0.3", "has-values": "^0.1.4", @@ -5796,7 +11933,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "dev": true, "requires": { "isarray": "1.0.0" } @@ -5806,34 +11942,29 @@ "has-values": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", - "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", - "dev": true + "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=" }, "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" } } }, "uritemplate": { "version": "0.3.4", "resolved": "https://registry.npmjs.org/uritemplate/-/uritemplate-0.3.4.tgz", - "integrity": "sha1-BdCoU/+8iw9Jqj1NKtd3sNHuBww=", - "dev": true + "integrity": "sha1-BdCoU/+8iw9Jqj1NKtd3sNHuBww=" }, "urix": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", - "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=", - "dev": true + "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=" }, "url": { "version": "0.10.3", "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", - "dev": true, "requires": { "punycode": "1.3.2", "querystring": "0.2.0" @@ -5843,7 +11974,6 @@ "version": "2.11.1", "resolved": "https://registry.npmjs.org/urllib/-/urllib-2.11.1.tgz", "integrity": "sha1-5F1Xnxu+Qsn64hzf9yVo88jIyUU=", - "dev": true, "requires": { "any-promise": "^1.2.0", "debug": "^2.2.0", @@ -5859,7 +11989,6 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -5867,8 +11996,7 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } }, @@ -5876,7 +12004,6 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/urllib-sync/-/urllib-sync-1.1.4.tgz", "integrity": "sha1-yRMI9JkaZe5iDWc85g/dJLvRjMo=", - "dev": true, "requires": { "urllib": "~2.11.0", "utility": "~1.7.1" @@ -5885,14 +12012,12 @@ "use": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", - "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", - "dev": true + "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==" }, "util": { "version": "0.10.4", "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", - "dev": true, "requires": { "inherits": "2.0.3" } @@ -5900,14 +12025,12 @@ "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", - "dev": true + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" }, "utility": { "version": "1.7.1", "resolved": "https://registry.npmjs.org/utility/-/utility-1.7.1.tgz", "integrity": "sha1-+3TN3IFqQRJ2ym6MqZMkfyPusKc=", - "dev": true, "requires": { "copy-to": "~2.0.1", "escape-html": "~1.0.3" @@ -5916,38 +12039,32 @@ "utils-merge": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", - "dev": true + "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=" }, "uuid": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.1.0.tgz", - "integrity": "sha512-DIWtzUkw04M4k3bf1IcpS2tngXEL26YUD2M0tMDUpnUrz2hgzUBlD55a4FjdLGPvfHxS6uluGWvaVEqgBcVa+g==", - "dev": true + "integrity": "sha512-DIWtzUkw04M4k3bf1IcpS2tngXEL26YUD2M0tMDUpnUrz2hgzUBlD55a4FjdLGPvfHxS6uluGWvaVEqgBcVa+g==" }, "validator": { "version": "10.11.0", "resolved": "https://registry.npmjs.org/validator/-/validator-10.11.0.tgz", - "integrity": "sha512-X/p3UZerAIsbBfN/IwahhYaBbY68EN/UQBWHtsbXGT5bfrH/p4NQzUCG1kF/rtKaNpnJ7jAu6NGTdSNtyNIXMw==", - "dev": true + "integrity": "sha512-X/p3UZerAIsbBfN/IwahhYaBbY68EN/UQBWHtsbXGT5bfrH/p4NQzUCG1kF/rtKaNpnJ7jAu6NGTdSNtyNIXMw==" }, "vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", - "dev": true + "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=" }, "vm-browserify": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", - "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==", - "dev": true + "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==" }, "webgme": { "version": "2.42.0", "resolved": "https://registry.npmjs.org/webgme/-/webgme-2.42.0.tgz", "integrity": "sha512-QG6v+G4nERYyjstAw7V1h1gkIKW7DNrDLXedaQ3Bg6og3VGD74MujKm8V45YnqiDyq2a8kVu2uOjOKpKa42OvQ==", - "dev": true, "requires": { "bower": "1.8.8", "q": "1.5.1", @@ -5960,17 +12077,19 @@ "webgme-autoviz": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/webgme-autoviz/-/webgme-autoviz-2.2.1.tgz", - "integrity": "sha1-O7RGprOlAXCOsdf9upB3+M6wuvM=" + "integrity": "sha1-O7RGprOlAXCOsdf9upB3+M6wuvM=", + "requires": {} }, "webgme-easydag": { "version": "git+ssh://git@github.com/dfst/webgme-easydag.git#cb461f2687c8a2aa00adc827ea3688b7f0e24ada", - "from": "webgme-easydag@github:dfst/webgme-easydag" + "integrity": "sha512-FGlGEizf/rG4PLNDt511YrEAOXkJh5EFwE++UZsCP07qb2fUfDjBErMHsj8Awi5g/NK2JcgFpMn3y/EDY2Ff4g==", + "from": "webgme-easydag@github:dfst/webgme-easydag", + "requires": {} }, "webgme-engine": { "version": "2.25.1", "resolved": "https://registry.npmjs.org/webgme-engine/-/webgme-engine-2.25.1.tgz", "integrity": "sha512-yZreptu5SR/esPLGdqij6YDgW35qm7SfqITGrCkMtW+XtSm9OH2eM/7prG/iQo+Ge2i6f+2YZmCmOLRnXlvC3A==", - "dev": true, "requires": { "adm-zip": "0.4.11", "agentkeepalive": "3.4.1", @@ -6012,35 +12131,37 @@ "superagent": "3.8.3", "underscore": "1.9.1", "webgme-ot": "0.0.16", - "webgme-rust-components": "webgme-rust-components@github:webgme/webgme-rust-components", + "webgme-rust-components": "github:webgme/webgme-rust-components", "webgme-webhook-manager": "0.1.1", "winston": "2.4.3" } }, "webgme-json-importer": { "version": "git+ssh://git@github.com/deepforge-dev/webgme-json-importer.git#6e60149e2ec5ef946b3749bf194c893a1e72a424", - "from": "webgme-json-importer@github:deepforge-dev/webgme-json-importer" + "integrity": "sha512-TXoWmcTcvQaxcGtYYCE6xNAMPsbZRbtMDIdgcrGxssfelo+4kQ+cQBwdPVRbl6LrnU+PEJcHMNvLpJUZLU+/EQ==", + "from": "webgme-json-importer@github:deepforge-dev/webgme-json-importer", + "requires": {} }, "webgme-ot": { "version": "0.0.16", "resolved": "https://registry.npmjs.org/webgme-ot/-/webgme-ot-0.0.16.tgz", - "integrity": "sha512-Aict9Ka1tDDXZ9mZ9BX/4F3AV/KVf1qSoxK0UHtfxM1sPuGr3a4nXAhnccl/3jbHEjSHNphn71lmfff6y3+HmA==", - "dev": true + "integrity": "sha512-Aict9Ka1tDDXZ9mZ9BX/4F3AV/KVf1qSoxK0UHtfxM1sPuGr3a4nXAhnccl/3jbHEjSHNphn71lmfff6y3+HmA==" }, "webgme-rust-components": { "version": "git+ssh://git@github.com/webgme/webgme-rust-components.git#ad446234b6c02fd722e7e454015857ee523fb172", - "from": "webgme-rust-components@github:webgme/webgme-rust-components", - "dev": true + "integrity": "sha512-jS1rMUMLb7hXMhBheFxiTA+yLw068o3cheiFIH0vszWN8xGIjGXZn8YrzOmK2hkAu3HLF4lT7nuTYJ+uSZUtpA==", + "from": "webgme-rust-components@github:webgme/webgme-rust-components" }, "webgme-simple-nodes": { "version": "git+ssh://git@github.com/brollb/webgme-simple-nodes.git#2a9fc79c93efd55067ef7c2e1559f9b31b0f97e5", - "from": "webgme-simple-nodes@github:brollb/webgme-simple-nodes" + "integrity": "sha512-WhK6thWo5fns7zuN1EUSgX0qShOMMPIS/AIgrRMagt0ZR7dWeW9wLYwydM3dQ5tqtvpNY2GP9BZuuSGLd3V6BA==", + "from": "webgme-simple-nodes@github:brollb/webgme-simple-nodes", + "requires": {} }, "webgme-user-management-page": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/webgme-user-management-page/-/webgme-user-management-page-0.5.0.tgz", "integrity": "sha512-sk/sYFTiVf5ntGoNi/ZdZQsufG5UasnZCjThZW0IsW8WeuD0Njak6gVQwMLOid5XX2nLNpAmjGKSrowF0YyoLw==", - "dev": true, "requires": { "body-parser": "^1.19.0", "ejs": "^2.7.4", @@ -6051,7 +12172,6 @@ "version": "1.19.0", "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", - "dev": true, "requires": { "bytes": "3.1.0", "content-type": "~1.0.4", @@ -6068,14 +12188,12 @@ "bytes": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", - "dev": true + "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==" }, "debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, "requires": { "ms": "2.0.0" } @@ -6083,14 +12201,12 @@ "ejs": { "version": "2.7.4", "resolved": "https://registry.npmjs.org/ejs/-/ejs-2.7.4.tgz", - "integrity": "sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA==", - "dev": true + "integrity": "sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA==" }, "http-errors": { "version": "1.7.2", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", - "dev": true, "requires": { "depd": "~1.1.2", "inherits": "2.0.3", @@ -6103,7 +12219,6 @@ "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dev": true, "requires": { "safer-buffer": ">= 2.1.2 < 3" } @@ -6111,20 +12226,17 @@ "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, "qs": { "version": "6.7.0", "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==", - "dev": true + "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" }, "raw-body": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", - "dev": true, "requires": { "bytes": "3.1.0", "http-errors": "1.7.2", @@ -6135,8 +12247,7 @@ "setprototypeof": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", - "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==", - "dev": true + "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" } } }, @@ -6144,7 +12255,6 @@ "version": "0.1.1", "resolved": "https://registry.npmjs.org/webgme-webhook-manager/-/webgme-webhook-manager-0.1.1.tgz", "integrity": "sha512-04uqNF6sNRenJoJwHaLDlPr3516HyghbVInyc0BcQ0OIlH3Je1+YnHYGyFcPzOyWCbKYumoeRCBNkRrsJHLUnw==", - "dev": true, "requires": { "mongodb": "^2.1.18", "msgpack-js": "^0.3.0", @@ -6157,7 +12267,6 @@ "version": "2.6.2", "resolved": "https://registry.npmjs.org/redis/-/redis-2.6.2.tgz", "integrity": "sha1-fMqwVjATrGGefdhMZRK4HT2FJXk=", - "dev": true, "requires": { "double-ended-queue": "^2.1.0-0", "redis-commands": "^1.2.0", @@ -6170,7 +12279,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/win-release/-/win-release-1.1.1.tgz", "integrity": "sha1-X6VeAr58qTTt/BJmVjLoSbcuUgk=", - "dev": true, "requires": { "semver": "^5.0.1" } @@ -6178,14 +12286,12 @@ "window-size": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.4.tgz", - "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=", - "dev": true + "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=" }, "winston": { "version": "2.4.3", "resolved": "https://registry.npmjs.org/winston/-/winston-2.4.3.tgz", "integrity": "sha512-GYKuysPz2pxYAVJD2NPsDLP5Z79SDEzPm9/j4tCjkF/n89iBNGBMJcR+dMUqxgPNgoSs6fVygPi+Vl2oxIpBuw==", - "dev": true, "requires": { "async": "~1.0.0", "colors": "1.0.x", @@ -6198,8 +12304,7 @@ "async": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/async/-/async-1.0.0.tgz", - "integrity": "sha1-+PwEyjoTeErenhZBr5hXjPvWR6k=", - "dev": true + "integrity": "sha1-+PwEyjoTeErenhZBr5hXjPvWR6k=" } } }, @@ -6207,7 +12312,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", - "dev": true, "requires": { "string-width": "^1.0.1", "strip-ansi": "^3.0.1" @@ -6222,7 +12326,6 @@ "version": "3.3.3", "resolved": "https://registry.npmjs.org/ws/-/ws-3.3.3.tgz", "integrity": "sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==", - "dev": true, "requires": { "async-limiter": "~1.0.0", "safe-buffer": "~5.1.0", @@ -6233,7 +12336,6 @@ "version": "0.4.17", "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.17.tgz", "integrity": "sha1-F76T6q4/O3eTWceVtBlwWogX6Gg=", - "dev": true, "requires": { "sax": ">=0.6.0", "xmlbuilder": "^4.1.0" @@ -6243,7 +12345,6 @@ "version": "4.2.1", "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-4.2.1.tgz", "integrity": "sha1-qlijBBoGb5DqoWwvU4n/GfP0YaU=", - "dev": true, "requires": { "lodash": "^4.0.0" } @@ -6251,32 +12352,27 @@ "xmlcreate": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-2.0.3.tgz", - "integrity": "sha512-HgS+X6zAztGa9zIK3Y3LXuJes33Lz9x+YyTxgrkIdabu2vqcGOWwdfCpf1hWLRrd553wd4QCDf6BBO6FfdsRiQ==", - "dev": true + "integrity": "sha512-HgS+X6zAztGa9zIK3Y3LXuJes33Lz9x+YyTxgrkIdabu2vqcGOWwdfCpf1hWLRrd553wd4QCDf6BBO6FfdsRiQ==" }, "xmlhttprequest-ssl": { "version": "1.5.5", "resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.5.tgz", - "integrity": "sha1-wodrBhaKrcQOV9l+gRkayPQ5iz4=", - "dev": true + "integrity": "sha1-wodrBhaKrcQOV9l+gRkayPQ5iz4=" }, "xtend": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", - "dev": true + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==" }, "y18n": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz", - "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=", - "dev": true + "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=" }, "yargs": { "version": "3.32.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.32.0.tgz", "integrity": "sha1-AwiOnr+edWtpdRYR0qXvWRSCyZU=", - "dev": true, "requires": { "camelcase": "^2.0.1", "cliui": "^3.0.3", @@ -6290,14 +12386,12 @@ "yeast": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/yeast/-/yeast-0.1.2.tgz", - "integrity": "sha1-AI4G2AlDIMNy28L47XagymyKxBk=", - "dev": true + "integrity": "sha1-AI4G2AlDIMNy28L47XagymyKxBk=" }, "z-schema": { "version": "3.25.1", "resolved": "https://registry.npmjs.org/z-schema/-/z-schema-3.25.1.tgz", "integrity": "sha512-7tDlwhrBG+oYFdXNOjILSurpfQyuVgkRe3hB2q8TEssamDHB7BbLWYkYO98nTn0FibfdFroFKDjndbgufAgS/Q==", - "dev": true, "requires": { "commander": "^2.7.1", "core-js": "^2.5.7", @@ -6318,7 +12412,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/zip-stream/-/zip-stream-1.2.0.tgz", "integrity": "sha1-qLxF9MG0lpnGuQGYuqyqzbzUugQ=", - "dev": true, "requires": { "archiver-utils": "^1.3.0", "compress-commons": "^1.2.0", diff --git a/src/plugins/CreateKerasMeta/schemas/activations.json b/src/plugins/CreateKerasMeta/schemas/activations.json index adac19b..f9ab893 100644 --- a/src/plugins/CreateKerasMeta/schemas/activations.json +++ b/src/plugins/CreateKerasMeta/schemas/activations.json @@ -11,8 +11,8 @@ "default": 1.0 } ], - "docstring": "Exponential Linear Unit.\n\n The exponential linear unit (ELU) with `alpha > 0` is:\n `x` if `x > 0` and\n `alpha * (exp(x) - 1)` if `x < 0`\n The ELU hyperparameter `alpha` controls the value to which an\n ELU saturates for negative net inputs. ELUs diminish the\n vanishing gradient effect.\n\n ELUs have negative values which pushes the mean of the activations\n closer to zero.\n Mean activations that are closer to zero enable faster learning as they\n bring the gradient closer to the natural gradient.\n ELUs saturate to a negative value when the argument gets smaller.\n Saturation means a small derivative which decreases the variation\n and the information that is propagated to the next layer.\n\n Example Usage:\n\n >>> import tensorflow as tf\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',\n ... input_shape=(28, 28, 1)))\n >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))\n >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))\n\n \n\n Arguments:\n x: Input tensor.\n alpha: A scalar, slope of negative section. `alpha` controls the value to\n which an ELU saturates for negative net inputs.\n\n Returns:\n The exponential linear unit (ELU) activation function: `x` if `x > 0` and\n `alpha * (exp(x) - 1)` if `x < 0`.\n\n\n Reference:\n [Fast and Accurate Deep Network Learning by Exponential Linear Units\n (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Exponential Linear Unit.\n\n The exponential linear unit (ELU) with `alpha > 0` is:\n `x` if `x > 0` and\n `alpha * (exp(x) - 1)` if `x < 0`\n The ELU hyperparameter `alpha` controls the value to which an\n ELU saturates for negative net inputs. ELUs diminish the\n vanishing gradient effect.\n\n ELUs have negative values which pushes the mean of the activations\n closer to zero.\n Mean activations that are closer to zero enable faster learning as they\n bring the gradient closer to the natural gradient.\n ELUs saturate to a negative value when the argument gets smaller.\n Saturation means a small derivative which decreases the variation\n and the information that is propagated to the next layer.\n\n Example Usage:\n\n >>> import tensorflow as tf\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',\n ... input_shape=(28, 28, 1)))\n >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))\n >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))\n\n \n\n Args:\n x: Input tensor.\n alpha: A scalar, slope of negative section. `alpha` controls the value to\n which an ELU saturates for negative net inputs.\n\n Returns:\n The exponential linear unit (ELU) activation function: `x` if `x > 0` and\n `alpha * (exp(x) - 1)` if `x < 0`.\n\n\n Reference:\n [Fast and Accurate Deep Network Learning by Exponential Linear Units\n (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -23,8 +23,25 @@ "default": null } ], - "docstring": "Exponential activation function.\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.exponential(a)\n >>> b.numpy()\n array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32)\n\n Arguments:\n x: Input tensor.\n\n Returns:\n Tensor with exponential activation: `exp(x)`.\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Exponential activation function.\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.exponential(a)\n >>> b.numpy()\n array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n Tensor with exponential activation: `exp(x)`.\n ", + "file": "keras/activations.py", + "aliases": [] + }, + { + "name": "gelu", + "arguments": [ + { + "name": "x", + "default": null + }, + { + "name": "approximate", + "default": "False", + "type": "boolean" + } + ], + "docstring": "Applies the Gaussian error linear unit (GELU) activation function.\n\n Gaussian error linear unit (GELU) computes\n `x * P(X <= x)`, where `P(X) ~ N(0, 1)`.\n The (GELU) nonlinearity weights inputs by their value, rather than gates\n inputs by their sign as in ReLU.\n\n For example:\n\n >>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)\n >>> y = tf.keras.activations.gelu(x)\n >>> y.numpy()\n array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],\n dtype=float32)\n >>> y = tf.keras.activations.gelu(x, approximate=True)\n >>> y.numpy()\n array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],\n dtype=float32)\n\n Args:\n x: Input tensor.\n approximate: A `bool`, whether to enable approximation.\n\n Returns:\n The gaussian error linear activation:\n `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`\n if `approximate` is `True` or\n `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,\n where `P(X) ~ N(0, 1)`,\n if `approximate` is `False`.\n\n Reference:\n - [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -35,8 +52,8 @@ "default": null } ], - "docstring": "Hard sigmoid activation function.\n\n A faster approximation of the sigmoid activation.\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.hard_sigmoid(a)\n >>> b.numpy()\n array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32)\n\n Arguments:\n x: Input tensor.\n\n Returns:\n The hard sigmoid activation, defined as:\n\n - `if x < -2.5: return 0`\n - `if x > 2.5: return 1`\n - `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Hard sigmoid activation function.\n\n A faster approximation of the sigmoid activation.\n Piecewise linear approximation of the sigmoid function.\n Ref: 'https://en.wikipedia.org/wiki/Hard_sigmoid'\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.hard_sigmoid(a)\n >>> b.numpy()\n array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The hard sigmoid activation, defined as:\n\n - `if x < -2.5: return 0`\n - `if x > 2.5: return 1`\n - `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -47,8 +64,8 @@ "default": null } ], - "docstring": "Linear activation function (pass-through).\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.linear(a)\n >>> b.numpy()\n array([-3., -1., 0., 1., 3.], dtype=float32)\n\n Arguments:\n x: Input tensor.\n\n Returns:\n The input, unmodified.\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Linear activation function (pass-through).\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.linear(a)\n >>> b.numpy()\n array([-3., -1., 0., 1., 3.], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The input, unmodified.\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -71,8 +88,8 @@ "default": 0 } ], - "docstring": "Applies the rectified linear unit activation function.\n\n With default values, this returns the standard ReLU activation:\n `max(x, 0)`, the element-wise maximum of 0 and the input tensor.\n\n Modifying default parameters allows you to use non-zero thresholds,\n change the max value of the activation,\n and to use a non-zero multiple of the input for values below the threshold.\n\n For example:\n\n >>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32)\n >>> tf.keras.activations.relu(foo).numpy()\n array([ 0., 0., 0., 5., 10.], dtype=float32)\n >>> tf.keras.activations.relu(foo, alpha=0.5).numpy()\n array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32)\n >>> tf.keras.activations.relu(foo, max_value=5).numpy()\n array([0., 0., 0., 5., 5.], dtype=float32)\n >>> tf.keras.activations.relu(foo, threshold=5).numpy()\n array([-0., -0., 0., 0., 10.], dtype=float32)\n\n Arguments:\n x: Input `tensor` or `variable`.\n alpha: A `float` that governs the slope for values lower than the\n threshold.\n max_value: A `float` that sets the saturation threshold (the largest value\n the function will return).\n threshold: A `float` giving the threshold value of the activation function\n below which values will be damped or set to zero.\n\n Returns:\n A `Tensor` representing the input tensor,\n transformed by the relu activation function.\n Tensor will be of the same shape and dtype of input `x`.\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Applies the rectified linear unit activation function.\n\n With default values, this returns the standard ReLU activation:\n `max(x, 0)`, the element-wise maximum of 0 and the input tensor.\n\n Modifying default parameters allows you to use non-zero thresholds,\n change the max value of the activation,\n and to use a non-zero multiple of the input for values below the threshold.\n\n For example:\n\n >>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32)\n >>> tf.keras.activations.relu(foo).numpy()\n array([ 0., 0., 0., 5., 10.], dtype=float32)\n >>> tf.keras.activations.relu(foo, alpha=0.5).numpy()\n array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32)\n >>> tf.keras.activations.relu(foo, max_value=5).numpy()\n array([0., 0., 0., 5., 5.], dtype=float32)\n >>> tf.keras.activations.relu(foo, threshold=5).numpy()\n array([-0., -0., 0., 0., 10.], dtype=float32)\n\n Args:\n x: Input `tensor` or `variable`.\n alpha: A `float` that governs the slope for values lower than the\n threshold.\n max_value: A `float` that sets the saturation threshold (the largest value\n the function will return).\n threshold: A `float` giving the threshold value of the activation function\n below which values will be damped or set to zero.\n\n Returns:\n A `Tensor` representing the input tensor,\n transformed by the relu activation function.\n Tensor will be of the same shape and dtype of input `x`.\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -83,8 +100,8 @@ "default": null } ], - "docstring": "Scaled Exponential Linear Unit (SELU).\n\n The Scaled Exponential Linear Unit (SELU) activation function is defined as:\n\n - `if x > 0: return scale * x`\n - `if x < 0: return scale * alpha * (exp(x) - 1)`\n\n where `alpha` and `scale` are pre-defined constants\n (`alpha=1.67326324` and `scale=1.05070098`).\n\n Basically, the SELU activation function multiplies `scale` (> 1) with the\n output of the `tf.keras.activations.elu` function to ensure a slope larger\n than one for positive inputs.\n\n The values of `alpha` and `scale` are\n chosen so that the mean and variance of the inputs are preserved\n between two consecutive layers as long as the weights are initialized\n correctly (see `tf.keras.initializers.LecunNormal` initializer)\n and the number of input units is \"large enough\"\n (see reference paper for more information).\n\n Example Usage:\n\n >>> num_classes = 10 # 10-class problem\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))\n\n Arguments:\n x: A tensor or variable to compute the activation function for.\n\n Returns:\n The scaled exponential unit activation: `scale * elu(x, alpha)`.\n\n Notes:\n - To be used together with the\n `tf.keras.initializers.LecunNormal` initializer.\n - To be used together with the dropout variant\n `tf.keras.layers.AlphaDropout` (not regular dropout).\n\n References:\n - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Scaled Exponential Linear Unit (SELU).\n\n The Scaled Exponential Linear Unit (SELU) activation function is defined as:\n\n - `if x > 0: return scale * x`\n - `if x < 0: return scale * alpha * (exp(x) - 1)`\n\n where `alpha` and `scale` are pre-defined constants\n (`alpha=1.67326324` and `scale=1.05070098`).\n\n Basically, the SELU activation function multiplies `scale` (> 1) with the\n output of the `tf.keras.activations.elu` function to ensure a slope larger\n than one for positive inputs.\n\n The values of `alpha` and `scale` are\n chosen so that the mean and variance of the inputs are preserved\n between two consecutive layers as long as the weights are initialized\n correctly (see `tf.keras.initializers.LecunNormal` initializer)\n and the number of input units is \"large enough\"\n (see reference paper for more information).\n\n Example Usage:\n\n >>> num_classes = 10 # 10-class problem\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))\n\n Args:\n x: A tensor or variable to compute the activation function for.\n\n Returns:\n The scaled exponential unit activation: `scale * elu(x, alpha)`.\n\n Notes:\n - To be used together with the\n `tf.keras.initializers.LecunNormal` initializer.\n - To be used together with the dropout variant\n `tf.keras.layers.AlphaDropout` (not regular dropout).\n\n References:\n - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -95,8 +112,8 @@ "default": null } ], - "docstring": "Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`.\n\n Applies the sigmoid activation function. For small values (<-5),\n `sigmoid` returns a value close to zero, and for large values (>5)\n the result of the function gets close to 1.\n\n Sigmoid is equivalent to a 2-element Softmax, where the second element is\n assumed to be zero. The sigmoid function always returns a value between\n 0 and 1.\n\n For example:\n\n >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n >>> b = tf.keras.activations.sigmoid(a)\n >>> b.numpy()\n array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01,\n 1.0000000e+00], dtype=float32)\n\n Arguments:\n x: Input tensor.\n\n Returns:\n Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`.\n\n Applies the sigmoid activation function. For small values (<-5),\n `sigmoid` returns a value close to zero, and for large values (>5)\n the result of the function gets close to 1.\n\n Sigmoid is equivalent to a 2-element Softmax, where the second element is\n assumed to be zero. The sigmoid function always returns a value between\n 0 and 1.\n\n For example:\n\n >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n >>> b = tf.keras.activations.sigmoid(a)\n >>> b.numpy()\n array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01,\n 1.0000000e+00], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -111,8 +128,8 @@ "default": -1 } ], - "docstring": "Softmax converts a real vector to a vector of categorical probabilities.\n\n The elements of the output vector are in range (0, 1) and sum to 1.\n\n Each vector is handled independently. The `axis` argument sets which axis\n of the input the function is applied along.\n\n Softmax is often used as the activation for the last\n layer of a classification network because the result could be interpreted as\n a probability distribution.\n\n The softmax of each vector x is computed as\n `exp(x) / tf.reduce_sum(exp(x))`.\n\n The input values in are the log-odds of the resulting probability.\n\n Arguments:\n x : Input tensor.\n axis: Integer, axis along which the softmax normalization is applied.\n\n Returns:\n Tensor, output of softmax transformation (all values are non-negative\n and sum to 1).\n\n Raises:\n ValueError: In case `dim(x) == 1`.\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Softmax converts a vector of values to a probability distribution.\n\n The elements of the output vector are in range (0, 1) and sum to 1.\n\n Each vector is handled independently. The `axis` argument sets which axis\n of the input the function is applied along.\n\n Softmax is often used as the activation for the last\n layer of a classification network because the result could be interpreted as\n a probability distribution.\n\n The softmax of each vector x is computed as\n `exp(x) / tf.reduce_sum(exp(x))`.\n\n The input values in are the log-odds of the resulting probability.\n\n Args:\n x : Input tensor.\n axis: Integer, axis along which the softmax normalization is applied.\n\n Returns:\n Tensor, output of softmax transformation (all values are non-negative\n and sum to 1).\n\n Examples:\n\n **Example 1: standalone usage**\n\n >>> inputs = tf.random.normal(shape=(32, 10))\n >>> outputs = tf.keras.activations.softmax(inputs)\n >>> tf.reduce_sum(outputs[0, :]) # Each sample in the batch now sums to 1\n \n\n **Example 2: usage in a `Dense` layer**\n\n >>> layer = tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax)\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -123,8 +140,8 @@ "default": null } ], - "docstring": "Softplus activation function, `softplus(x) = log(exp(x) + 1)`.\n \n Example Usage:\n \n >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n >>> b = tf.keras.activations.softplus(a) \n >>> b.numpy()\n array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,\n 2.0000000e+01], dtype=float32)\n \n Arguments:\n x: Input tensor.\n\n Returns:\n The softplus activation: `log(exp(x) + 1)`.\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Softplus activation function, `softplus(x) = log(exp(x) + 1)`.\n\n Example Usage:\n\n >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n >>> b = tf.keras.activations.softplus(a)\n >>> b.numpy()\n array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,\n 2.0000000e+01], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The softplus activation: `log(exp(x) + 1)`.\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -135,8 +152,8 @@ "default": null } ], - "docstring": "Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.\n \n Example Usage:\n \n >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)\n >>> b = tf.keras.activations.softsign(a)\n >>> b.numpy()\n array([-0.5, 0. , 0.5], dtype=float32)\n\n Arguments:\n x: Input tensor.\n\n Returns:\n The softsign activation: `x / (abs(x) + 1)`.\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.\n\n Example Usage:\n\n >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)\n >>> b = tf.keras.activations.softsign(a)\n >>> b.numpy()\n array([-0.5, 0. , 0.5], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The softsign activation: `x / (abs(x) + 1)`.\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -147,8 +164,8 @@ "default": null } ], - "docstring": "Swish activation function, `swish(x) = x * sigmoid(x)`.\n\n Swish activation function which returns `x*sigmoid(x)`.\n It is a smooth, non-monotonic function that consistently matches\n or outperforms ReLU on deep networks, it is unbounded above and\n bounded below.\n\n\n Example Usage:\n\n >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n >>> b = tf.keras.activations.swish(a)\n >>> b.numpy()\n array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,\n 2.0000000e+01], dtype=float32)\n\n Arguments:\n x: Input tensor.\n\n Returns:\n The swish activation applied to `x` (see reference paper for details).\n\n Reference:\n - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Swish activation function, `swish(x) = x * sigmoid(x)`.\n\n Swish activation function which returns `x*sigmoid(x)`.\n It is a smooth, non-monotonic function that consistently matches\n or outperforms ReLU on deep networks, it is unbounded above and\n bounded below.\n\n\n Example Usage:\n\n >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n >>> b = tf.keras.activations.swish(a)\n >>> b.numpy()\n array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,\n 2.0000000e+01], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The swish activation applied to `x` (see reference paper for details).\n\n Reference:\n - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)\n ", + "file": "keras/activations.py", "aliases": [] }, { @@ -159,8 +176,8 @@ "default": null } ], - "docstring": "Hyperbolic tangent activation function.\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.tanh(a)\n >>> b.numpy()\n array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32)\n\n Arguments:\n x: Input tensor.\n\n Returns:\n Tensor of same shape and dtype of input `x`, with tanh activation:\n `tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.\n ", - "file": "tensorflow/python/keras/activations.py", + "docstring": "Hyperbolic tangent activation function.\n\n For example:\n\n >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n >>> b = tf.keras.activations.tanh(a)\n >>> b.numpy()\n array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n Tensor of same shape and dtype of input `x`, with tanh activation:\n `tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.\n ", + "file": "keras/activations.py", "aliases": [] } ] \ No newline at end of file diff --git a/src/plugins/CreateKerasMeta/schemas/constraints.json b/src/plugins/CreateKerasMeta/schemas/constraints.json index 15b54f4..fb5a0d8 100644 --- a/src/plugins/CreateKerasMeta/schemas/constraints.json +++ b/src/plugins/CreateKerasMeta/schemas/constraints.json @@ -2,7 +2,7 @@ { "name": "MaxNorm", "base": "Constraint", - "docstring": "MaxNorm weight constraint.\n\n Constrains the weights incident to each hidden unit\n to have a norm less than or equal to a desired value.\n\n Also available via the shortcut function `tf.keras.constraints.max_norm`.\n\n Arguments:\n max_value: the maximum norm value for the incoming weights.\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`.\n\n ", + "docstring": "MaxNorm weight constraint.\n\n Constrains the weights incident to each hidden unit\n to have a norm less than or equal to a desired value.\n\n Also available via the shortcut function `tf.keras.constraints.max_norm`.\n\n Args:\n max_value: the maximum norm value for the incoming weights.\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`.\n\n ", "arguments": [ { "name": "self", @@ -29,7 +29,7 @@ "default": null } ], - "file": "tensorflow/python/keras/constraints.py", + "file": "keras/constraints.py", "aliases": [ "max_norm" ] @@ -37,7 +37,7 @@ { "name": "MinMaxNorm", "base": "Constraint", - "docstring": "MinMaxNorm weight constraint.\n\n Constrains the weights incident to each hidden unit\n to have the norm between a lower bound and an upper bound.\n\n Also available via the shortcut function `tf.keras.constraints.min_max_norm`.\n\n Arguments:\n min_value: the minimum norm for the incoming weights.\n max_value: the maximum norm for the incoming weights.\n rate: rate for enforcing the constraint: weights will be\n rescaled to yield\n `(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.\n Effectively, this means that rate=1.0 stands for strict\n enforcement of the constraint, while rate<1.0 means that\n weights will be rescaled at each step to slowly move\n towards a value inside the desired interval.\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`.\n ", + "docstring": "MinMaxNorm weight constraint.\n\n Constrains the weights incident to each hidden unit\n to have the norm between a lower bound and an upper bound.\n\n Also available via the shortcut function `tf.keras.constraints.min_max_norm`.\n\n Args:\n min_value: the minimum norm for the incoming weights.\n max_value: the maximum norm for the incoming weights.\n rate: rate for enforcing the constraint: weights will be\n rescaled to yield\n `(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.\n Effectively, this means that rate=1.0 stands for strict\n enforcement of the constraint, while rate<1.0 means that\n weights will be rescaled at each step to slowly move\n towards a value inside the desired interval.\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`.\n ", "arguments": [ { "name": "self", @@ -72,7 +72,7 @@ "default": null } ], - "file": "tensorflow/python/keras/constraints.py", + "file": "keras/constraints.py", "aliases": [ "min_max_norm" ] @@ -94,7 +94,7 @@ "default": null } ], - "file": "tensorflow/python/keras/constraints.py", + "file": "keras/constraints.py", "aliases": [ "non_neg" ] @@ -116,7 +116,7 @@ "default": null } ], - "file": "tensorflow/python/keras/constraints.py", + "file": "keras/constraints.py", "aliases": [ "radial_constraint" ] @@ -124,7 +124,7 @@ { "name": "UnitNorm", "base": "Constraint", - "docstring": "Constrains the weights incident to each hidden unit to have unit norm.\n\n Also available via the shortcut function `tf.keras.constraints.unit_norm`.\n\n Arguments:\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`.\n ", + "docstring": "Constrains the weights incident to each hidden unit to have unit norm.\n\n Also available via the shortcut function `tf.keras.constraints.unit_norm`.\n\n Args:\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`.\n ", "arguments": [ { "name": "self", @@ -147,7 +147,7 @@ "default": null } ], - "file": "tensorflow/python/keras/constraints.py", + "file": "keras/constraints.py", "aliases": [ "unit_norm" ] diff --git a/src/plugins/CreateKerasMeta/schemas/initializers.json b/src/plugins/CreateKerasMeta/schemas/initializers.json index 06c9589..fcce913 100644 --- a/src/plugins/CreateKerasMeta/schemas/initializers.json +++ b/src/plugins/CreateKerasMeta/schemas/initializers.json @@ -29,14 +29,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "constant" ] }, { "name": "GlorotNormal", - "base": "VarianceScaling", + "base": "Initializer", "docstring": "The Glorot normal initializer, also called Xavier normal initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.glorot_normal`.\n\n Draws samples from a truncated normal distribution centered on 0 with `stddev\n = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in\n the weight tensor and `fan_out` is the number of output units in the weight\n tensor.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.GlorotNormal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.GlorotNormal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)\n ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))\n ", "arguments": [ { @@ -64,14 +64,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "glorot_normal" ] }, { "name": "GlorotUniform", - "base": "VarianceScaling", + "base": "Initializer", "docstring": "The Glorot uniform initializer, also called Xavier uniform initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.glorot_uniform`.\n\n Draws samples from a uniform distribution within `[-limit, limit]`, where\n `limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units\n in the weight tensor and `fan_out` is the number of output units).\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.GlorotUniform()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.GlorotUniform()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)\n ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))\n ", "arguments": [ { @@ -99,15 +99,15 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "glorot_uniform" ] }, { "name": "HeNormal", - "base": "VarianceScaling", - "docstring": "He normal initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.he_normal`.\n\n It draws samples from a truncated normal distribution centered on 0 with\n `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the\n weight tensor.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.HeNormal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.HeNormal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Arguments:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))\n ", + "base": "Initializer", + "docstring": "He normal initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.he_normal`.\n\n It draws samples from a truncated normal distribution centered on 0 with\n `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the\n weight tensor.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.HeNormal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.HeNormal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))\n ", "arguments": [ { "name": "self", @@ -134,15 +134,15 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "he_normal" ] }, { "name": "HeUniform", - "base": "VarianceScaling", - "docstring": "He uniform variance scaling initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.he_uniform`.\n\n Draws samples from a uniform distribution within `[-limit, limit]`, where\n `limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the\n weight tensor).\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.HeUniform()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.HeUniform()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Arguments:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))\n ", + "base": "Initializer", + "docstring": "He uniform variance scaling initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.he_uniform`.\n\n Draws samples from a uniform distribution within `[-limit, limit]`, where\n `limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the\n weight tensor).\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.HeUniform()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.HeUniform()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))\n ", "arguments": [ { "name": "self", @@ -169,14 +169,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "he_uniform" ] }, { "name": "Identity", - "base": "Initializer", + "base": "object", "docstring": "Initializer that generates the identity matrix.\n\n Also available via the shortcut function `tf.keras.initializers.identity`.\n\n Only usable for generating 2D matrices.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Identity()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Identity()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n gain: Multiplicative factor to apply to the identity matrix.\n ", "arguments": [ { @@ -204,15 +204,15 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "identity" ] }, { "name": "LecunNormal", - "base": "VarianceScaling", - "docstring": "Lecun normal initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.lecun_normal`.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Draws samples from a truncated normal distribution centered on 0 with `stddev\n = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight\n tensor.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.LecunNormal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.LecunNormal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Arguments:\n seed: A Python integer. Used to seed the random generator.\n\n References:\n - Self-Normalizing Neural Networks,\n [Klambauer et al., 2017]\n (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)\n ([pdf]\n (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))\n - Efficient Backprop,\n [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n ", + "base": "Initializer", + "docstring": "Lecun normal initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.lecun_normal`.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Draws samples from a truncated normal distribution centered on 0 with `stddev\n = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight\n tensor.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.LecunNormal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.LecunNormal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. Used to seed the random generator.\n\n References:\n - Self-Normalizing Neural Networks,\n [Klambauer et al., 2017]\n (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)\n ([pdf]\n (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))\n - Efficient Backprop,\n [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n ", "arguments": [ { "name": "self", @@ -239,15 +239,15 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "lecun_normal" ] }, { "name": "LecunUniform", - "base": "VarianceScaling", - "docstring": "Lecun uniform initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.lecun_uniform`.\n\n Draws samples from a uniform distribution within `[-limit, limit]`,\n where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the\n weight tensor).\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.LecunUniform()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.LecunUniform()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Arguments:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n - Self-Normalizing Neural Networks,\n [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long\n ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))\n - Efficient Backprop,\n [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n ", + "base": "Initializer", + "docstring": "Lecun uniform initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.lecun_uniform`.\n\n Draws samples from a uniform distribution within `[-limit, limit]`,\n where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the\n weight tensor).\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.LecunUniform()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.LecunUniform()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n - Self-Normalizing Neural Networks,\n [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long\n ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))\n - Efficient Backprop,\n [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n ", "arguments": [ { "name": "self", @@ -274,14 +274,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "lecun_uniform" ] }, { "name": "Ones", - "base": "Initializer", + "base": "object", "docstring": "Initializer that generates tensors initialized to 1.\n\n Also available via the shortcut function `tf.keras.initializers.ones`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Ones()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Ones()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n ", "arguments": null, "abstract": false, @@ -300,14 +300,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "ones" ] }, { "name": "Orthogonal", - "base": "Initializer", + "base": "object", "docstring": "Initializer that generates an orthogonal matrix.\n\n Also available via the shortcut function `tf.keras.initializers.orthogonal`.\n\n If the shape of the tensor to initialize is two-dimensional, it is initialized\n with an orthogonal matrix obtained from the QR decomposition of a matrix of\n random numbers drawn from a normal distribution.\n If the matrix has fewer rows than columns then the output will have orthogonal\n rows. Otherwise, the output will have orthogonal columns.\n\n If the shape of the tensor to initialize is more than two-dimensional,\n a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`\n is initialized, where `n` is the length of the shape vector.\n The matrix is subsequently reshaped to give a tensor of the desired shape.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Orthogonal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Orthogonal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n gain: multiplicative factor to apply to the orthogonal matrix\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)\n ([pdf](https://arxiv.org/pdf/1312.6120.pdf))\n ", "arguments": [ { @@ -339,14 +339,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "orthogonal" ] }, { "name": "RandomNormal", - "base": "Initializer", + "base": "object", "docstring": "Initializer that generates tensors with a normal distribution.\n\n Also available via the shortcut function\n `tf.keras.initializers.random_normal`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values to\n generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the random\n values to generate.\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n ", "arguments": [ { @@ -382,14 +382,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "random_normal" ] }, { "name": "RandomUniform", - "base": "Initializer", + "base": "object", "docstring": "Initializer that generates tensors with a uniform distribution.\n\n Also available via the shortcut function\n `tf.keras.initializers.random_uniform`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n minval: A python scalar or a scalar tensor. Lower bound of the range of\n random values to generate (inclusive).\n maxval: A python scalar or a scalar tensor. Upper bound of the range of\n random values to generate (exclusive).\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n ", "arguments": [ { @@ -425,7 +425,7 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "random_uniform", "uniform" @@ -433,8 +433,8 @@ }, { "name": "TruncatedNormal", - "base": "Initializer", - "docstring": "Initializer that generates a truncated normal distribution.\n\n Also available via the shortcut function\n `tf.keras.initializers.truncated_normal`.\n\n The values generated are similar to values from a\n `tf.keras.initializers.RandomNormal` initializer except that values more\n than two standard deviations from the mean are\n discarded and re-drawn.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values\n to generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the\n random values to generate.\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n ", + "base": "object", + "docstring": "Initializer that generates a truncated normal distribution.\n\n Also available via the shortcut function\n `tf.keras.initializers.truncated_normal`.\n\n The values generated are similar to values from a\n `tf.keras.initializers.RandomNormal` initializer except that values more\n than two standard deviations from the mean are\n discarded and re-drawn.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values\n to generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the\n random values to generate before truncation.\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n ", "arguments": [ { "name": "self", @@ -469,14 +469,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "truncated_normal" ] }, { "name": "VarianceScaling", - "base": "Initializer", + "base": "object", "docstring": "Initializer capable of adapting its scale to the shape of weights tensors.\n\n Also available via the shortcut function\n `tf.keras.initializers.variance_scaling`.\n\n With `distribution=\"truncated_normal\" or \"untruncated_normal\"`, samples are\n drawn from a truncated/untruncated normal distribution with a mean of zero and\n a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`,\n where `n` is:\n\n - number of input units in the weight tensor, if `mode=\"fan_in\"`\n - number of output units, if `mode=\"fan_out\"`\n - average of the numbers of input and output units, if `mode=\"fan_avg\"`\n\n With `distribution=\"uniform\"`, samples are drawn from a uniform distribution\n within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.VarianceScaling(\n ... scale=0.1, mode='fan_in', distribution='uniform')\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.VarianceScaling(\n ... scale=0.1, mode='fan_in', distribution='uniform')\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n scale: Scaling factor (positive float).\n mode: One of \"fan_in\", \"fan_out\", \"fan_avg\".\n distribution: Random distribution to use. One of \"truncated_normal\",\n \"untruncated_normal\" and \"uniform\".\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n ", "arguments": [ { @@ -516,14 +516,14 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "variance_scaling" ] }, { "name": "Zeros", - "base": "Initializer", + "base": "object", "docstring": "Initializer that generates tensors initialized to 0.\n\n Also available via the shortcut function `tf.keras.initializers.zeros`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Zeros()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Zeros()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n ", "arguments": null, "abstract": false, @@ -542,7 +542,7 @@ "default": "None" } ], - "file": "tensorflow/python/keras/initializers/initializers_v2.py", + "file": "keras/initializers/initializers_v2.py", "aliases": [ "zeros" ] diff --git a/src/plugins/CreateKerasMeta/schemas/layers.json b/src/plugins/CreateKerasMeta/schemas/layers.json index 46b3e88..3a32e57 100644 --- a/src/plugins/CreateKerasMeta/schemas/layers.json +++ b/src/plugins/CreateKerasMeta/schemas/layers.json @@ -2,18 +2,18 @@ { "name": "AbstractRNNCell", "base": "Layer", - "docstring": "Abstract object representing an RNN cell.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This is the base class for implementing RNN cells with custom behavior.\n\n Every `RNNCell` must have the properties below and implement `call` with\n the signature `(output, next_state) = call(input, state)`.\n\n Examples:\n\n ```python\n class MinimalRNNCell(AbstractRNNCell):\n\n def __init__(self, units, **kwargs):\n self.units = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n @property\n def state_size(self):\n return self.units\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = K.dot(inputs, self.kernel)\n output = h + K.dot(prev_output, self.recurrent_kernel)\n return output, output\n ```\n\n This definition of cell differs from the definition used in the literature.\n In the literature, 'cell' refers to an object with a single scalar output.\n This definition refers to a horizontal array of such units.\n\n An RNN cell, in the most abstract setting, is anything that has\n a state and performs some operation that takes a matrix of inputs.\n This operation results in an output matrix with `self.output_size` columns.\n If `self.state_size` is an integer, this operation also results in a new\n state matrix with `self.state_size` columns. If `self.state_size` is a\n (possibly nested tuple of) TensorShape object(s), then it should return a\n matching structure of Tensors having shape `[batch_size].concatenate(s)`\n for each `s` in `self.batch_size`.\n ", + "docstring": "Abstract object representing an RNN cell.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This is the base class for implementing RNN cells with custom behavior.\n\n Every `RNNCell` must have the properties below and implement `call` with\n the signature `(output, next_state) = call(input, state)`.\n\n Examples:\n\n ```python\n class MinimalRNNCell(AbstractRNNCell):\n\n def __init__(self, units, **kwargs):\n self.units = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n @property\n def state_size(self):\n return self.units\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = backend.dot(inputs, self.kernel)\n output = h + backend.dot(prev_output, self.recurrent_kernel)\n return output, output\n ```\n\n This definition of cell differs from the definition used in the literature.\n In the literature, 'cell' refers to an object with a single scalar output.\n This definition refers to a horizontal array of such units.\n\n An RNN cell, in the most abstract setting, is anything that has\n a state and performs some operation that takes a matrix of inputs.\n This operation results in an output matrix with `self.output_size` columns.\n If `self.state_size` is an integer, this operation also results in a new\n state matrix with `self.state_size` columns. If `self.state_size` is a\n (possibly nested tuple of) TensorShape object(s), then it should return a\n matching structure of Tensors having shape `[batch_size].concatenate(s)`\n for each `s` in `self.batch_size`.\n ", "arguments": null, "abstract": true, "outputs": [], "inputs": null, - "file": "tensorflow/python/keras/layers/recurrent.py", + "file": "keras/layers/recurrent.py", "aliases": [] }, { "name": "Activation", "base": "Layer", - "docstring": "Applies an activation function to an output.\n\n Arguments:\n activation: Activation function, such as `tf.nn.relu`, or string name of\n built-in activation function, such as \"relu\".\n\n Usage:\n\n >>> layer = tf.keras.layers.Activation('relu')\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 2.0]\n >>> layer = tf.keras.layers.Activation(tf.nn.relu)\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 2.0]\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", + "docstring": "Applies an activation function to an output.\n\n Args:\n activation: Activation function, such as `tf.nn.relu`, or string name of\n built-in activation function, such as \"relu\".\n\n Usage:\n\n >>> layer = tf.keras.layers.Activation('relu')\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 2.0]\n >>> layer = tf.keras.layers.Activation(tf.nn.relu)\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 2.0]\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", "arguments": [ { "name": "self", @@ -36,13 +36,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/core.py", "aliases": [] }, { "name": "ActivityRegularization", "base": "Layer", - "docstring": "Layer that applies an update to the cost function based input activity.\n\n Arguments:\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", + "docstring": "Layer that applies an update to the cost function based input activity.\n\n Args:\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", "arguments": [ { "name": "self", @@ -69,7 +69,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/core.py", "aliases": [] }, { @@ -94,13 +94,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/merge.py", + "file": "keras/layers/merge.py", "aliases": [] }, { "name": "AdditiveAttention", "base": "BaseDenseAttention", - "docstring": "Additive attention layer, a.k.a. Bahdanau-style attention.\n\n Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of\n shape `[batch_size, Tv, dim]` and `key` tensor of shape\n `[batch_size, Tv, dim]`. The calculation follows the steps:\n\n 1. Reshape `query` and `value` into shapes `[batch_size, Tq, 1, dim]`\n and `[batch_size, 1, Tv, dim]` respectively.\n 2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear\n sum: `scores = tf.reduce_sum(tf.tanh(query + value), axis=-1)`\n 3. Use scores to calculate a distribution with shape\n `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.\n 4. Use `distribution` to create a linear combination of `value` with\n shape `batch_size, Tq, dim]`:\n `return tf.matmul(distribution, value)`.\n\n Args:\n use_scale: If `True`, will create a variable to scale the attention scores.\n causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such\n that position `i` cannot attend to positions `j > i`. This prevents the\n flow of information from the future towards the past.\n dropout: Float between 0 and 1. Fraction of the units to drop for the\n attention scores.\n\n Call Arguments:\n\n inputs: List of the following tensors:\n * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.\n * value: Value `Tensor` of shape `[batch_size, Tv, dim]`.\n * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not\n given, will use `value` for both `key` and `value`, which is the\n most common case.\n mask: List of the following tensors:\n * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.\n If given, the output will be zero at the positions where\n `mask==False`.\n * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.\n If given, will apply the mask such that values at positions where\n `mask==False` do not contribute to the result.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n\n Output shape:\n\n Attention outputs of shape `[batch_size, Tq, dim]`.\n\n The meaning of `query`, `value` and `key` depend on the application. In the\n case of text similarity, for example, `query` is the sequence embeddings of\n the first piece of text and `value` is the sequence embeddings of the second\n piece of text. `key` is usually the same tensor as `value`.\n\n Here is a code example for using `AdditiveAttention` in a CNN+Attention\n network:\n\n ```python\n # Variable-length int sequences.\n query_input = tf.keras.Input(shape=(None,), dtype='int32')\n value_input = tf.keras.Input(shape=(None,), dtype='int32')\n\n # Embedding lookup.\n token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)\n # Query embeddings of shape [batch_size, Tq, dimension].\n query_embeddings = token_embedding(query_input)\n # Value embeddings of shape [batch_size, Tv, dimension].\n value_embeddings = token_embedding(value_input)\n\n # CNN layer.\n cnn_layer = tf.keras.layers.Conv1D(\n filters=100,\n kernel_size=4,\n # Use 'same' padding so outputs have the same shape as inputs.\n padding='same')\n # Query encoding of shape [batch_size, Tq, filters].\n query_seq_encoding = cnn_layer(query_embeddings)\n # Value encoding of shape [batch_size, Tv, filters].\n value_seq_encoding = cnn_layer(value_embeddings)\n\n # Query-value attention of shape [batch_size, Tq, filters].\n query_value_attention_seq = tf.keras.layers.AdditiveAttention()(\n [query_seq_encoding, value_seq_encoding])\n\n # Reduce over the sequence axis to produce encodings of shape\n # [batch_size, filters].\n query_encoding = tf.keras.layers.GlobalAveragePooling1D()(\n query_seq_encoding)\n query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(\n query_value_attention_seq)\n\n # Concatenate query and document encodings to produce a DNN input layer.\n input_layer = tf.keras.layers.Concatenate()(\n [query_encoding, query_value_attention])\n\n # Add DNN layers, and create Model.\n # ...\n ```\n ", + "docstring": "Additive attention layer, a.k.a. Bahdanau-style attention.\n\n Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of\n shape `[batch_size, Tv, dim]` and `key` tensor of shape\n `[batch_size, Tv, dim]`. The calculation follows the steps:\n\n 1. Reshape `query` and `key` into shapes `[batch_size, Tq, 1, dim]`\n and `[batch_size, 1, Tv, dim]` respectively.\n 2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear\n sum: `scores = tf.reduce_sum(tf.tanh(query + key), axis=-1)`\n 3. Use scores to calculate a distribution with shape\n `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.\n 4. Use `distribution` to create a linear combination of `value` with\n shape `[batch_size, Tq, dim]`:\n `return tf.matmul(distribution, value)`.\n\n Args:\n use_scale: If `True`, will create a variable to scale the attention scores.\n causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such\n that position `i` cannot attend to positions `j > i`. This prevents the\n flow of information from the future towards the past.\n dropout: Float between 0 and 1. Fraction of the units to drop for the\n attention scores.\n\n Call Args:\n\n inputs: List of the following tensors:\n * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.\n * value: Value `Tensor` of shape `[batch_size, Tv, dim]`.\n * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not\n given, will use `value` for both `key` and `value`, which is the\n most common case.\n mask: List of the following tensors:\n * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.\n If given, the output will be zero at the positions where\n `mask==False`.\n * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.\n If given, will apply the mask such that values at positions where\n `mask==False` do not contribute to the result.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n return_attention_scores: bool, it `True`, returns the attention scores\n (after masking and softmax) as an additional output argument.\n\n Output:\n\n Attention outputs of shape `[batch_size, Tq, dim]`.\n [Optional] Attention scores after masking and softmax with shape\n `[batch_size, Tq, Tv]`.\n\n The meaning of `query`, `value` and `key` depend on the application. In the\n case of text similarity, for example, `query` is the sequence embeddings of\n the first piece of text and `value` is the sequence embeddings of the second\n piece of text. `key` is usually the same tensor as `value`.\n\n Here is a code example for using `AdditiveAttention` in a CNN+Attention\n network:\n\n ```python\n # Variable-length int sequences.\n query_input = tf.keras.Input(shape=(None,), dtype='int32')\n value_input = tf.keras.Input(shape=(None,), dtype='int32')\n\n # Embedding lookup.\n token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)\n # Query embeddings of shape [batch_size, Tq, dimension].\n query_embeddings = token_embedding(query_input)\n # Value embeddings of shape [batch_size, Tv, dimension].\n value_embeddings = token_embedding(value_input)\n\n # CNN layer.\n cnn_layer = tf.keras.layers.Conv1D(\n filters=100,\n kernel_size=4,\n # Use 'same' padding so outputs have the same shape as inputs.\n padding='same')\n # Query encoding of shape [batch_size, Tq, filters].\n query_seq_encoding = cnn_layer(query_embeddings)\n # Value encoding of shape [batch_size, Tv, filters].\n value_seq_encoding = cnn_layer(value_embeddings)\n\n # Query-value attention of shape [batch_size, Tq, filters].\n query_value_attention_seq = tf.keras.layers.AdditiveAttention()(\n [query_seq_encoding, value_seq_encoding])\n\n # Reduce over the sequence axis to produce encodings of shape\n # [batch_size, filters].\n query_encoding = tf.keras.layers.GlobalAveragePooling1D()(\n query_seq_encoding)\n query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(\n query_value_attention_seq)\n\n # Concatenate query and document encodings to produce a DNN input layer.\n input_layer = tf.keras.layers.Concatenate()(\n [query_encoding, query_value_attention])\n\n # Add DNN layers, and create Model.\n # ...\n ```\n ", "arguments": [ { "name": "self", @@ -130,15 +130,20 @@ { "name": "training", "default": "None" + }, + { + "name": "return_attention_scores", + "default": "False", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/dense_attention.py", + "file": "keras/layers/dense_attention.py", "aliases": [] }, { "name": "AlphaDropout", "base": "Layer", - "docstring": "Applies Alpha Dropout to the input.\n\n Alpha Dropout is a `Dropout` that keeps mean and variance of inputs\n to their original values, in order to ensure the self-normalizing property\n even after this dropout.\n Alpha Dropout fits well to Scaled Exponential Linear Units\n by randomly setting activations to the negative saturation value.\n\n Arguments:\n rate: float, drop probability (as with `Dropout`).\n The multiplicative noise will have\n standard deviation `sqrt(rate / (1 - rate))`.\n seed: A Python integer to use as random seed.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", + "docstring": "Applies Alpha Dropout to the input.\n\n Alpha Dropout is a `Dropout` that keeps mean and variance of inputs\n to their original values, in order to ensure the self-normalizing property\n even after this dropout.\n Alpha Dropout fits well to Scaled Exponential Linear Units\n by randomly setting activations to the negative saturation value.\n\n Args:\n rate: float, drop probability (as with `Dropout`).\n The multiplicative noise will have\n standard deviation `sqrt(rate / (1 - rate))`.\n seed: A Python integer to use as random seed.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", "arguments": [ { "name": "self", @@ -173,13 +178,13 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/noise.py", + "file": "keras/layers/noise.py", "aliases": [] }, { "name": "Attention", "base": "BaseDenseAttention", - "docstring": "Dot-product attention layer, a.k.a. Luong-style attention.\n\n Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of\n shape `[batch_size, Tv, dim]` and `key` tensor of shape\n `[batch_size, Tv, dim]`. The calculation follows the steps:\n\n 1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot\n product: `scores = tf.matmul(query, key, transpose_b=True)`.\n 2. Use scores to calculate a distribution with shape\n `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.\n 3. Use `distribution` to create a linear combination of `value` with\n shape `[batch_size, Tq, dim]`:\n `return tf.matmul(distribution, value)`.\n\n Args:\n use_scale: If `True`, will create a scalar variable to scale the attention\n scores.\n causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such\n that position `i` cannot attend to positions `j > i`. This prevents the\n flow of information from the future towards the past.\n dropout: Float between 0 and 1. Fraction of the units to drop for the\n attention scores.\n\n Call Arguments:\n\n inputs: List of the following tensors:\n * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.\n * value: Value `Tensor` of shape `[batch_size, Tv, dim]`.\n * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not\n given, will use `value` for both `key` and `value`, which is the\n most common case.\n mask: List of the following tensors:\n * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.\n If given, the output will be zero at the positions where\n `mask==False`.\n * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.\n If given, will apply the mask such that values at positions where\n `mask==False` do not contribute to the result.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n\n Output shape:\n\n Attention outputs of shape `[batch_size, Tq, dim]`.\n\n The meaning of `query`, `value` and `key` depend on the application. In the\n case of text similarity, for example, `query` is the sequence embeddings of\n the first piece of text and `value` is the sequence embeddings of the second\n piece of text. `key` is usually the same tensor as `value`.\n\n Here is a code example for using `Attention` in a CNN+Attention network:\n\n ```python\n # Variable-length int sequences.\n query_input = tf.keras.Input(shape=(None,), dtype='int32')\n value_input = tf.keras.Input(shape=(None,), dtype='int32')\n\n # Embedding lookup.\n token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)\n # Query embeddings of shape [batch_size, Tq, dimension].\n query_embeddings = token_embedding(query_input)\n # Value embeddings of shape [batch_size, Tv, dimension].\n value_embeddings = token_embedding(value_input)\n\n # CNN layer.\n cnn_layer = tf.keras.layers.Conv1D(\n filters=100,\n kernel_size=4,\n # Use 'same' padding so outputs have the same shape as inputs.\n padding='same')\n # Query encoding of shape [batch_size, Tq, filters].\n query_seq_encoding = cnn_layer(query_embeddings)\n # Value encoding of shape [batch_size, Tv, filters].\n value_seq_encoding = cnn_layer(value_embeddings)\n\n # Query-value attention of shape [batch_size, Tq, filters].\n query_value_attention_seq = tf.keras.layers.Attention()(\n [query_seq_encoding, value_seq_encoding])\n\n # Reduce over the sequence axis to produce encodings of shape\n # [batch_size, filters].\n query_encoding = tf.keras.layers.GlobalAveragePooling1D()(\n query_seq_encoding)\n query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(\n query_value_attention_seq)\n\n # Concatenate query and document encodings to produce a DNN input layer.\n input_layer = tf.keras.layers.Concatenate()(\n [query_encoding, query_value_attention])\n\n # Add DNN layers, and create Model.\n # ...\n ```\n ", + "docstring": "Dot-product attention layer, a.k.a. Luong-style attention.\n\n Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of\n shape `[batch_size, Tv, dim]` and `key` tensor of shape\n `[batch_size, Tv, dim]`. The calculation follows the steps:\n\n 1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot\n product: `scores = tf.matmul(query, key, transpose_b=True)`.\n 2. Use scores to calculate a distribution with shape\n `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.\n 3. Use `distribution` to create a linear combination of `value` with\n shape `[batch_size, Tq, dim]`:\n `return tf.matmul(distribution, value)`.\n\n Args:\n use_scale: If `True`, will create a scalar variable to scale the attention\n scores.\n causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such\n that position `i` cannot attend to positions `j > i`. This prevents the\n flow of information from the future towards the past.\n dropout: Float between 0 and 1. Fraction of the units to drop for the\n attention scores.\n\n Call Args:\n\n inputs: List of the following tensors:\n * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.\n * value: Value `Tensor` of shape `[batch_size, Tv, dim]`.\n * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not\n given, will use `value` for both `key` and `value`, which is the\n most common case.\n mask: List of the following tensors:\n * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.\n If given, the output will be zero at the positions where\n `mask==False`.\n * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.\n If given, will apply the mask such that values at positions where\n `mask==False` do not contribute to the result.\n return_attention_scores: bool, it `True`, returns the attention scores\n (after masking and softmax) as an additional output argument.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n\n Output:\n\n Attention outputs of shape `[batch_size, Tq, dim]`.\n [Optional] Attention scores after masking and softmax with shape\n `[batch_size, Tq, Tv]`.\n\n The meaning of `query`, `value` and `key` depend on the application. In the\n case of text similarity, for example, `query` is the sequence embeddings of\n the first piece of text and `value` is the sequence embeddings of the second\n piece of text. `key` is usually the same tensor as `value`.\n\n Here is a code example for using `Attention` in a CNN+Attention network:\n\n ```python\n # Variable-length int sequences.\n query_input = tf.keras.Input(shape=(None,), dtype='int32')\n value_input = tf.keras.Input(shape=(None,), dtype='int32')\n\n # Embedding lookup.\n token_embedding = tf.keras.layers.Embedding(input_dim=1000, output_dim=64)\n # Query embeddings of shape [batch_size, Tq, dimension].\n query_embeddings = token_embedding(query_input)\n # Value embeddings of shape [batch_size, Tv, dimension].\n value_embeddings = token_embedding(value_input)\n\n # CNN layer.\n cnn_layer = tf.keras.layers.Conv1D(\n filters=100,\n kernel_size=4,\n # Use 'same' padding so outputs have the same shape as inputs.\n padding='same')\n # Query encoding of shape [batch_size, Tq, filters].\n query_seq_encoding = cnn_layer(query_embeddings)\n # Value encoding of shape [batch_size, Tv, filters].\n value_seq_encoding = cnn_layer(value_embeddings)\n\n # Query-value attention of shape [batch_size, Tq, filters].\n query_value_attention_seq = tf.keras.layers.Attention()(\n [query_seq_encoding, value_seq_encoding])\n\n # Reduce over the sequence axis to produce encodings of shape\n # [batch_size, filters].\n query_encoding = tf.keras.layers.GlobalAveragePooling1D()(\n query_seq_encoding)\n query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(\n query_value_attention_seq)\n\n # Concatenate query and document encodings to produce a DNN input layer.\n input_layer = tf.keras.layers.Concatenate()(\n [query_encoding, query_value_attention])\n\n # Add DNN layers, and create Model.\n # ...\n ```\n ", "arguments": [ { "name": "self", @@ -209,9 +214,14 @@ { "name": "training", "default": "None" + }, + { + "name": "return_attention_scores", + "default": "False", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/dense_attention.py", + "file": "keras/layers/dense_attention.py", "aliases": [] }, { @@ -236,13 +246,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/merge.py", + "file": "keras/layers/merge.py", "aliases": [] }, { "name": "AveragePooling1D", "base": "Pooling1D", - "docstring": "Average pooling for temporal data.\n\n Arguments:\n pool_size: Integer, size of the average pooling windows.\n strides: Integer, or None. Factor by which to downscale.\n E.g. 2 will halve the input.\n If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, steps)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, downsampled_steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, downsampled_steps)`.\n ", + "docstring": "Average pooling for temporal data.\n\n Downsamples the input representation by taking the average value over the\n window defined by `pool_size`. The window is shifted by `strides`. The\n resulting output when using \"valid\" padding option has a shape of:\n `output_shape = (input_shape - pool_size + 1) / strides)`\n\n The resulting output shape when using the \"same\" padding option is:\n `output_shape = input_shape / strides`\n\n For example, for strides=1 and padding=\"valid\":\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> x\n \n >>> avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2,\n ... strides=1, padding='valid')\n >>> avg_pool_1d(x)\n \n\n For example, for strides=2 and padding=\"valid\":\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> x\n \n >>> avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2,\n ... strides=2, padding='valid')\n >>> avg_pool_1d(x)\n \n\n For example, for strides=1 and padding=\"same\":\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> x\n \n >>> avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2,\n ... strides=1, padding='same')\n >>> avg_pool_1d(x)\n \n\n Args:\n pool_size: Integer, size of the average pooling windows.\n strides: Integer, or None. Factor by which to downscale.\n E.g. 2 will halve the input.\n If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, steps)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, downsampled_steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, downsampled_steps)`.\n ", "arguments": [ { "name": "self", @@ -277,7 +287,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/pooling.py", + "file": "keras/layers/pooling.py", "aliases": [ "AvgPool1D" ] @@ -285,7 +295,7 @@ { "name": "AveragePooling2D", "base": "Pooling2D", - "docstring": "Average pooling operation for spatial data.\n\n Arguments:\n pool_size: integer or tuple of 2 integers,\n factors by which to downscale (vertical, horizontal).\n `(2, 2)` will halve the input in both spatial dimension.\n If only one integer is specified, the same window length\n will be used for both dimensions.\n strides: Integer, tuple of 2 integers, or None.\n Strides values.\n If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, rows, cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, rows, cols)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.\n ", + "docstring": "Average pooling operation for spatial data.\n\n Downsamples the input along its spatial dimensions (height and width)\n by taking the average value over an input window\n (of size defined by `pool_size`) for each channel of the input.\n The window is shifted by `strides` along each dimension.\n\n The resulting output when using `\"valid\"` padding option has a shape\n (number of rows or columns) of:\n `output_shape = math.floor((input_shape - pool_size) / strides) + 1`\n (when `input_shape >= pool_size`)\n\n The resulting output shape when using the `\"same\"` padding option is:\n `output_shape = math.floor((input_shape - 1) / strides) + 1`\n\n For example, for `strides=(1, 1)` and `padding=\"valid\"`:\n\n >>> x = tf.constant([[1., 2., 3.],\n ... [4., 5., 6.],\n ... [7., 8., 9.]])\n >>> x = tf.reshape(x, [1, 3, 3, 1])\n >>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),\n ... strides=(1, 1), padding='valid')\n >>> avg_pool_2d(x)\n \n\n For example, for `stride=(2, 2)` and `padding=\"valid\"`:\n\n >>> x = tf.constant([[1., 2., 3., 4.],\n ... [5., 6., 7., 8.],\n ... [9., 10., 11., 12.]])\n >>> x = tf.reshape(x, [1, 3, 4, 1])\n >>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),\n ... strides=(2, 2), padding='valid')\n >>> avg_pool_2d(x)\n \n\n For example, for `strides=(1, 1)` and `padding=\"same\"`:\n\n >>> x = tf.constant([[1., 2., 3.],\n ... [4., 5., 6.],\n ... [7., 8., 9.]])\n >>> x = tf.reshape(x, [1, 3, 3, 1])\n >>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),\n ... strides=(1, 1), padding='same')\n >>> avg_pool_2d(x)\n \n\n Args:\n pool_size: integer or tuple of 2 integers,\n factors by which to downscale (vertical, horizontal).\n `(2, 2)` will halve the input in both spatial dimension.\n If only one integer is specified, the same window length\n will be used for both dimensions.\n strides: Integer, tuple of 2 integers, or None.\n Strides values.\n If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, rows, cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, rows, cols)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.\n ", "arguments": [ { "name": "self", @@ -323,7 +333,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/pooling.py", + "file": "keras/layers/pooling.py", "aliases": [ "AvgPool2D" ] @@ -331,7 +341,7 @@ { "name": "AveragePooling3D", "base": "Pooling3D", - "docstring": "Average pooling operation for 3D data (spatial or spatio-temporal).\n\n Arguments:\n pool_size: tuple of 3 integers,\n factors by which to downscale (dim1, dim2, dim3).\n `(2, 2, 2)` will halve the size of the 3D input in each dimension.\n strides: tuple of 3 integers, or None. Strides values.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n\n Output shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`\n ", + "docstring": "Average pooling operation for 3D data (spatial or spatio-temporal).\n\n Downsamples the input along its spatial dimensions (depth, height, and width)\n by taking the average value over an input window\n (of size defined by `pool_size`) for each channel of the input.\n The window is shifted by `strides` along each dimension.\n\n Args:\n pool_size: tuple of 3 integers,\n factors by which to downscale (dim1, dim2, dim3).\n `(2, 2, 2)` will halve the size of the 3D input in each dimension.\n strides: tuple of 3 integers, or None. Strides values.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n\n Output shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`\n\n Example:\n\n ```python\n depth = 30\n height = 30\n width = 30\n input_channels = 3\n\n inputs = tf.keras.Input(shape=(depth, height, width, input_channels))\n layer = tf.keras.layers.AveragePooling3D(pool_size=3)\n outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)\n ```\n ", "arguments": [ { "name": "self", @@ -370,7 +380,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/pooling.py", + "file": "keras/layers/pooling.py", "aliases": [ "AvgPool3D" ] @@ -378,7 +388,7 @@ { "name": "BatchNormalization", "base": "BatchNormalizationBase", - "docstring": "Normalize and scale inputs or activations.\n\n Normalize the activations of the previous layer at each batch,\n i.e. applies a transformation that maintains the mean activation\n close to 0 and the activation standard deviation close to 1.\n\n Batch normalization differs from other layers in several key aspects:\n\n 1) Adding BatchNormalization with `training=True` to a model causes the\n result of one example to depend on the contents of all other examples in a\n minibatch. Be careful when padding batches or masking examples, as these can\n change the minibatch statistics and affect other examples.\n\n 2) Updates to the weights (moving statistics) are based on the forward pass\n of a model rather than the result of gradient computations.\n\n 3) When performing inference using a model containing batch normalization, it\n is generally (though not always) desirable to use accumulated statistics\n rather than mini-batch statistics. This is accomplished by passing\n `training=False` when calling the model, or using `model.predict`.\n\n Arguments:\n axis: Integer, the axis that should be normalized (typically the features\n axis). For instance, after a `Conv2D` layer with\n `data_format=\"channels_first\"`, set `axis=1` in `BatchNormalization`.\n momentum: Momentum for the moving average.\n epsilon: Small float added to variance to avoid dividing by zero.\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the\n next layer is linear (also e.g. `nn.relu`), this can be disabled since the\n scaling will be done by the next layer.\n beta_initializer: Initializer for the beta weight.\n gamma_initializer: Initializer for the gamma weight.\n moving_mean_initializer: Initializer for the moving mean.\n moving_variance_initializer: Initializer for the moving variance.\n beta_regularizer: Optional regularizer for the beta weight.\n gamma_regularizer: Optional regularizer for the gamma weight.\n beta_constraint: Optional constraint for the beta weight.\n gamma_constraint: Optional constraint for the gamma weight.\n renorm: Whether to use [Batch Renormalization](\n https://arxiv.org/abs/1702.03275). This adds extra variables during\n training. The inference is the same for either value of this parameter.\n renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to\n scalar `Tensors` used to clip the renorm correction. The correction `(r,\n d)` is used as `corrected_value = normalized_value * r + d`, with `r`\n clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,\n dmax are set to inf, 0, inf, respectively.\n renorm_momentum: Momentum used to update the moving means and standard\n deviations with renorm. Unlike `momentum`, this affects training and\n should be neither too small (which would add noise) nor too large (which\n would give stale estimates). Note that `momentum` is still applied to get\n the means and variances for inference.\n fused: if `True`, use a faster, fused implementation, or raise a ValueError\n if the fused implementation cannot be used. If `None`, use the faster\n implementation if possible. If False, do not used the fused\n implementation.\n trainable: Boolean, if `True` the variables will be marked as trainable.\n virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,\n which means batch normalization is performed across the whole batch. When\n `virtual_batch_size` is not `None`, instead perform \"Ghost Batch\n Normalization\", which creates virtual sub-batches which are each\n normalized separately (with shared gamma, beta, and moving statistics).\n Must divide the actual batch size during execution.\n adjustment: A function taking the `Tensor` containing the (dynamic) shape of\n the input tensor and returning a pair (scale, bias) to apply to the\n normalized values (before gamma and beta), only during training. For\n example, if axis==-1,\n `adjustment = lambda shape: (\n tf.random.uniform(shape[-1:], 0.93, 1.07),\n tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized\n value by up to 7% up or down, then shift the result by up to 0.1\n (with independent scaling and bias for each feature but shared\n across all examples), and finally apply gamma and/or beta. If\n `None`, no adjustment is applied. Cannot be specified if\n virtual_batch_size is specified.\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode.\n - `training=True`: The layer will normalize its inputs using the mean and\n variance of the current batch of inputs.\n - `training=False`: The layer will normalize its inputs using the mean and\n variance of its moving statistics, learned during training.\n Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n Output shape: Same shape as input. \n **About setting `layer.trainable = False` on a `BatchNormalization layer:**\n\n The meaning of setting `layer.trainable = False` is to freeze the layer,\n i.e. its internal state will not change during training:\n its trainable weights will not be updated\n during `fit()` or `train_on_batch()`, and its state updates will not be run.\n\n Usually, this does not necessarily mean that the layer is run in inference\n mode (which is normally controlled by the `training` argument that can\n be passed when calling a layer). \"Frozen state\" and \"inference mode\"\n are two separate concepts.\n\n However, in the case of the `BatchNormalization` layer, **setting\n `trainable = False` on the layer means that the layer will be\n subsequently run in inference mode** (meaning that it will use\n the moving mean and the moving variance to normalize the current batch,\n rather than using the mean and variance of the current batch).\n\n This behavior has been introduced in TensorFlow 2.0, in order\n to enable `layer.trainable = False` to produce the most commonly\n expected behavior in the convnet fine-tuning use case.\n\n Note that:\n - This behavior only occurs as of TensorFlow 2.0. In 1.*,\n setting `layer.trainable = False` would freeze the layer but would\n not switch it to inference mode.\n - Setting `trainable` on an model containing other layers will\n recursively set the `trainable` value of all inner layers.\n - If the value of the `trainable`\n attribute is changed after calling `compile()` on a model,\n the new value doesn't take effect for this model\n until `compile()` is called again.\n \n Normalization equations: Consider the intermediate activations \\(x\\) of a\n mini-batch of size\n \\\\(m\\\\): We can compute the mean and variance of the batch \\\\({\\mu_B} =\n \\frac{1}{m} \\sum_{i=1}^{m} {x_i}\\\\) \\\\({\\sigma_B^2} = \\frac{1}{m}\n \\sum_{i=1}^{m} ({x_i} - {\\mu_B})^2\\\\) and then compute a normalized\n \\\\(x\\\\), including a small factor \\\\({\\epsilon}\\\\) for numerical\n stability. \\\\(\\hat{x_i} = \\frac{x_i - \\mu_B}{\\sqrt{\\sigma_B^2 +\n \\epsilon}}\\\\) And finally \\\\(\\hat{x}\\) is linearly transformed by\n \\({\\gamma}\\\\)\n and \\\\({\\beta}\\\\), which are learned parameters: \\\\({y_i} = {\\gamma *\n \\hat{x_i} + \\beta}\\\\)\n Reference:\n - [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).\n ", + "docstring": "Layer that normalizes its inputs.\n\n Batch normalization applies a transformation that maintains the mean output\n close to 0 and the output standard deviation close to 1.\n\n Importantly, batch normalization works differently during training and\n during inference.\n\n **During training** (i.e. when using `fit()` or when calling the layer/model\n with the argument `training=True`), the layer normalizes its output using\n the mean and standard deviation of the current batch of inputs. That is to\n say, for each channel being normalized, the layer returns\n `gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where:\n\n - `epsilon` is small constant (configurable as part of the constructor\n arguments)\n - `gamma` is a learned scaling factor (initialized as 1), which\n can be disabled by passing `scale=False` to the constructor.\n - `beta` is a learned offset factor (initialized as 0), which\n can be disabled by passing `center=False` to the constructor.\n\n **During inference** (i.e. when using `evaluate()` or `predict()` or when\n calling the layer/model with the argument `training=False` (which is the\n default), the layer normalizes its output using a moving average of the\n mean and standard deviation of the batches it has seen during training. That\n is to say, it returns\n `gamma * (batch - self.moving_mean) / sqrt(self.moving_var + epsilon) + beta`.\n\n `self.moving_mean` and `self.moving_var` are non-trainable variables that\n are updated each time the layer in called in training mode, as such:\n\n - `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)`\n - `moving_var = moving_var * momentum + var(batch) * (1 - momentum)`\n\n As such, the layer will only normalize its inputs during inference\n *after having been trained on data that has similar statistics as the\n inference data*.\n\n Args:\n axis: Integer, the axis that should be normalized (typically the features\n axis). For instance, after a `Conv2D` layer with\n `data_format=\"channels_first\"`, set `axis=1` in `BatchNormalization`.\n momentum: Momentum for the moving average.\n epsilon: Small float added to variance to avoid dividing by zero.\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the\n next layer is linear (also e.g. `nn.relu`), this can be disabled since the\n scaling will be done by the next layer.\n beta_initializer: Initializer for the beta weight.\n gamma_initializer: Initializer for the gamma weight.\n moving_mean_initializer: Initializer for the moving mean.\n moving_variance_initializer: Initializer for the moving variance.\n beta_regularizer: Optional regularizer for the beta weight.\n gamma_regularizer: Optional regularizer for the gamma weight.\n beta_constraint: Optional constraint for the beta weight.\n gamma_constraint: Optional constraint for the gamma weight.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode.\n - `training=True`: The layer will normalize its inputs using the mean and\n variance of the current batch of inputs.\n - `training=False`: The layer will normalize its inputs using the mean and\n variance of its moving statistics, learned during training.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape` (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n\n Output shape:\n Same shape as input.\n\n Reference:\n - [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).\n\n **About setting `layer.trainable = False` on a `BatchNormalization` layer:**\n\n The meaning of setting `layer.trainable = False` is to freeze the layer,\n i.e. its internal state will not change during training:\n its trainable weights will not be updated\n during `fit()` or `train_on_batch()`, and its state updates will not be run.\n\n Usually, this does not necessarily mean that the layer is run in inference\n mode (which is normally controlled by the `training` argument that can\n be passed when calling a layer). \"Frozen state\" and \"inference mode\"\n are two separate concepts.\n\n However, in the case of the `BatchNormalization` layer, **setting\n `trainable = False` on the layer means that the layer will be\n subsequently run in inference mode** (meaning that it will use\n the moving mean and the moving variance to normalize the current batch,\n rather than using the mean and variance of the current batch).\n\n This behavior has been introduced in TensorFlow 2.0, in order\n to enable `layer.trainable = False` to produce the most commonly\n expected behavior in the convnet fine-tuning use case.\n\n Note that:\n - Setting `trainable` on an model containing other layers will\n recursively set the `trainable` value of all inner layers.\n - If the value of the `trainable`\n attribute is changed after calling `compile()` on a model,\n the new value doesn't take effect for this model\n until `compile()` is called again.\n ", "arguments": [ { "name": "self", @@ -437,40 +447,6 @@ { "name": "gamma_constraint", "default": "None" - }, - { - "name": "renorm", - "default": "False", - "type": "boolean" - }, - { - "name": "renorm_clipping", - "default": "None" - }, - { - "name": "renorm_momentum", - "default": 0.99 - }, - { - "name": "fused", - "default": "None" - }, - { - "name": "trainable", - "default": "True", - "type": "boolean" - }, - { - "name": "virtual_batch_size", - "default": "None" - }, - { - "name": "adjustment", - "default": "None" - }, - { - "name": "name", - "default": "None" } ], "abstract": false, @@ -489,13 +465,13 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/normalization_v2.py", + "file": "keras/layers/normalization/batch_normalization.py", "aliases": [] }, { "name": "Bidirectional", "base": "Wrapper", - "docstring": "Bidirectional wrapper for RNNs.\n\n Arguments:\n layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or\n `keras.layers.GRU`. It could also be a `keras.layers.Layer` instance\n that meets the following criteria:\n 1. Be a sequence-processing layer (accepts 3D+ inputs).\n 2. Have a `go_backwards`, `return_sequences` and `return_state`\n attribute (with the same semantics as for the `RNN` class).\n 3. Have an `input_spec` attribute.\n 4. Implement serialization via `get_config()` and `from_config()`.\n Note that the recommended way to create new RNN layers is to write a\n custom RNN cell and use it with `keras.layers.RNN`, instead of\n subclassing `keras.layers.Layer` directly.\n merge_mode: Mode by which outputs of the forward and backward RNNs will be\n combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the\n outputs will not be combined, they will be returned as a list. Default\n value is 'concat'.\n backward_layer: Optional `keras.layers.RNN`, or `keras.layers.Layer`\n instance to be used to handle backwards input processing.\n If `backward_layer` is not provided, the layer instance passed as the\n `layer` argument will be used to generate the backward layer\n automatically.\n Note that the provided `backward_layer` layer should have properties\n matching those of the `layer` argument, in particular it should have the\n same values for `stateful`, `return_states`, `return_sequence`, etc.\n In addition, `backward_layer` and `layer` should have different\n `go_backwards` argument values.\n A `ValueError` will be raised if these requirements are not met.\n\n Call arguments:\n The call arguments for this layer are the same as those of the wrapped RNN\n layer.\n Beware that when passing the `initial_state` argument during the call of\n this layer, the first half in the list of elements in the `initial_state`\n list will be passed to the forward RNN call and the last half in the list\n of elements will be passed to the backward RNN call.\n\n Raises:\n ValueError:\n 1. If `layer` or `backward_layer` is not a `Layer` instance.\n 2. In case of invalid `merge_mode` argument.\n 3. If `backward_layer` has mismatched properties compared to `layer`.\n\n Examples:\n\n ```python\n model = Sequential()\n model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10)))\n model.add(Bidirectional(LSTM(10)))\n model.add(Dense(5))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n # With custom backward layer\n model = Sequential()\n forward_layer = LSTM(10, return_sequences=True)\n backward_layer = LSTM(10, activation='relu', return_sequences=True,\n go_backwards=True)\n model.add(Bidirectional(forward_layer, backward_layer=backward_layer,\n input_shape=(5, 10)))\n model.add(Dense(5))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n ```\n ", + "docstring": "Bidirectional wrapper for RNNs.\n\n Args:\n layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or\n `keras.layers.GRU`. It could also be a `keras.layers.Layer` instance\n that meets the following criteria:\n 1. Be a sequence-processing layer (accepts 3D+ inputs).\n 2. Have a `go_backwards`, `return_sequences` and `return_state`\n attribute (with the same semantics as for the `RNN` class).\n 3. Have an `input_spec` attribute.\n 4. Implement serialization via `get_config()` and `from_config()`.\n Note that the recommended way to create new RNN layers is to write a\n custom RNN cell and use it with `keras.layers.RNN`, instead of\n subclassing `keras.layers.Layer` directly.\n - When the `returns_sequences` is true, the output of the masked timestep\n will be zero regardless of the layer's original `zero_output_for_mask`\n value.\n merge_mode: Mode by which outputs of the forward and backward RNNs will be\n combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the\n outputs will not be combined, they will be returned as a list. Default\n value is 'concat'.\n backward_layer: Optional `keras.layers.RNN`, or `keras.layers.Layer`\n instance to be used to handle backwards input processing.\n If `backward_layer` is not provided, the layer instance passed as the\n `layer` argument will be used to generate the backward layer\n automatically.\n Note that the provided `backward_layer` layer should have properties\n matching those of the `layer` argument, in particular it should have the\n same values for `stateful`, `return_states`, `return_sequences`, etc.\n In addition, `backward_layer` and `layer` should have different\n `go_backwards` argument values.\n A `ValueError` will be raised if these requirements are not met.\n\n Call arguments:\n The call arguments for this layer are the same as those of the wrapped RNN\n layer.\n Beware that when passing the `initial_state` argument during the call of\n this layer, the first half in the list of elements in the `initial_state`\n list will be passed to the forward RNN call and the last half in the list\n of elements will be passed to the backward RNN call.\n\n Raises:\n ValueError:\n 1. If `layer` or `backward_layer` is not a `Layer` instance.\n 2. In case of invalid `merge_mode` argument.\n 3. If `backward_layer` has mismatched properties compared to `layer`.\n\n Examples:\n\n ```python\n model = Sequential()\n model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10)))\n model.add(Bidirectional(LSTM(10)))\n model.add(Dense(5))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n # With custom backward layer\n model = Sequential()\n forward_layer = LSTM(10, return_sequences=True)\n backward_layer = LSTM(10, activation='relu', return_sequences=True,\n go_backwards=True)\n model.add(Bidirectional(forward_layer, backward_layer=backward_layer,\n input_shape=(5, 10)))\n model.add(Dense(5))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n ```\n ", "arguments": [ { "name": "self", @@ -538,7 +514,82 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/wrappers.py", + "file": "keras/layers/wrappers.py", + "aliases": [] + }, + { + "name": "CategoryEncoding", + "base": "Layer", + "docstring": "Category encoding layer.\n\n This layer provides options for condensing data into a categorical encoding\n when the total number of tokens are known in advance. It accepts integer\n values as inputs, and it outputs a dense representation of those\n inputs. For integer inputs where the total number of tokens is not known,\n use instead `tf.keras.layers.IntegerLookup`.\n\n Examples:\n\n **One-hot encoding data**\n\n >>> layer = tf.keras.layers.CategoryEncoding(\n ... num_tokens=4, output_mode=\"one_hot\")\n >>> layer([3, 2, 0, 1])\n \n\n **Multi-hot encoding data**\n\n >>> layer = tf.keras.layers.CategoryEncoding(\n ... num_tokens=4, output_mode=\"multi_hot\")\n >>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])\n \n\n **Using weighted inputs in `\"count\"` mode**\n\n >>> layer = tf.keras.layers.CategoryEncoding(\n ... num_tokens=4, output_mode=\"count\")\n >>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])\n >>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)\n \n\n Args:\n num_tokens: The total number of tokens the layer should support. All inputs\n to the layer must integers in the range `0 <= value < num_tokens`, or an\n error will be thrown.\n output_mode: Specification for the output of the layer.\n Defaults to `\"multi_hot\"`. Values can be `\"one_hot\"`, `\"multi_hot\"` or\n `\"count\"`, configuring the layer as follows:\n - `\"one_hot\"`: Encodes each individual element in the input into an\n array of `num_tokens` size, containing a 1 at the element index. If\n the last dimension is size 1, will encode on that dimension. If the\n last dimension is not size 1, will append a new dimension for the\n encoded output.\n - `\"multi_hot\"`: Encodes each sample in the input into a single array\n of `num_tokens` size, containing a 1 for each vocabulary term present\n in the sample. Treats the last dimension as the sample dimension, if\n input shape is `(..., sample_length)`, output shape will be\n `(..., num_tokens)`.\n - `\"count\"`: Like `\"multi_hot\"`, but the int array contains a count of\n the number of times the token at that index appeared in the sample.\n For all output modes, currently only output up to rank 2 is supported.\n sparse: Boolean. If true, returns a `SparseTensor` instead of a dense\n `Tensor`. Defaults to `False`.\n\n Call arguments:\n inputs: A 1D or 2D tensor of integer inputs.\n count_weights: A tensor in the same shape as `inputs` indicating the\n weight for each sample value when summing up in `count` mode. Not used in\n `\"multi_hot\"` or `\"one_hot\"` modes.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "num_tokens", + "default": "None" + }, + { + "name": "output_mode", + "default": "multi_hot" + }, + { + "name": "sparse", + "default": "False", + "type": "boolean" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + }, + { + "name": "count_weights", + "default": "None" + } + ], + "file": "keras/layers/preprocessing/category_encoding.py", + "aliases": [] + }, + { + "name": "CenterCrop", + "base": "Layer", + "docstring": "Crop the central portion of the images to target height and width.\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "height", + "default": null + }, + { + "name": "width", + "default": null + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/preprocessing/image_preprocessing.py", "aliases": [] }, { @@ -567,13 +618,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/merge.py", + "file": "keras/layers/merge.py", "aliases": [] }, { "name": "Conv1D", "base": "Conv", - "docstring": "1D convolution layer (e.g. temporal convolution).\n\n This layer creates a convolution kernel that is convolved\n with the layer input over a single spatial (or temporal) dimension\n to produce a tensor of outputs.\n If `use_bias` is True, a bias vector is created and added to the outputs.\n Finally, if `activation` is not `None`,\n it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide an `input_shape` argument\n (tuple of integers or `None`, e.g.\n `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,\n or `(None, 128)` for variable-length sequences of 128-dimensional vectors.\n\n Examples:\n\n >>> # The inputs are 128-length vectors with 10 timesteps, and the batch size\n >>> # is 4.\n >>> input_shape = (4, 10, 128)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv1D(\n ... 32, 3, activation='relu',input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 8, 32)\n\n >>> # With extended batch shape [4, 7] (e.g. weather data where batch\n >>> # dimensions correspond to spatial location and the third dimension\n >>> # corresponds to time.)\n >>> input_shape = (4, 7, 10, 128)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv1D(\n ... 32, 3, activation='relu', input_shape=input_shape[2:])(x)\n >>> print(y.shape)\n (4, 7, 8, 32)\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"causal\"` or `\"same\"` (case-insensitive).\n `\"causal\"` results in causal (dilated) convolutions, e.g. `output[t]`\n does not depend on `input[t+1:]`. Useful when modeling temporal data\n where the model should not violate the temporal order.\n See [WaveNet: A Generative Model for Raw Audio, section\n 2.1](https://arxiv.org/abs/1609.03499).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n dilation_rate: an integer or tuple/list of a single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n groups: A positive integer specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved\n separately with `filters / groups` filters. The output is the\n concatenation of all the `groups` results along the channel axis.\n Input channels and `filters` must both be divisible by `groups`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 3+D tensor with shape: `batch_shape + (steps, input_dim)`\n\n Output shape:\n 3+D tensor with shape: `batch_shape + (new_steps, filters)`\n `steps` value might have changed due to padding or strides.\n\n Returns:\n A tensor of rank 3 representing\n `activation(conv1d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n ", + "docstring": "1D convolution layer (e.g. temporal convolution).\n\n This layer creates a convolution kernel that is convolved\n with the layer input over a single spatial (or temporal) dimension\n to produce a tensor of outputs.\n If `use_bias` is True, a bias vector is created and added to the outputs.\n Finally, if `activation` is not `None`,\n it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide an `input_shape` argument\n (tuple of integers or `None`, e.g.\n `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,\n or `(None, 128)` for variable-length sequences of 128-dimensional vectors.\n\n Examples:\n\n >>> # The inputs are 128-length vectors with 10 timesteps, and the batch size\n >>> # is 4.\n >>> input_shape = (4, 10, 128)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv1D(\n ... 32, 3, activation='relu',input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 8, 32)\n\n >>> # With extended batch shape [4, 7] (e.g. weather data where batch\n >>> # dimensions correspond to spatial location and the third dimension\n >>> # corresponds to time.)\n >>> input_shape = (4, 7, 10, 128)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv1D(\n ... 32, 3, activation='relu', input_shape=input_shape[2:])(x)\n >>> print(y.shape)\n (4, 7, 8, 32)\n\n Args:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"` or `\"causal\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n `\"causal\"` results in causal (dilated) convolutions, e.g. `output[t]`\n does not depend on `input[t+1:]`. Useful when modeling temporal data\n where the model should not violate the temporal order.\n See [WaveNet: A Generative Model for Raw Audio, section\n 2.1](https://arxiv.org/abs/1609.03499).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n dilation_rate: an integer or tuple/list of a single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n groups: A positive integer specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved\n separately with `filters / groups` filters. The output is the\n concatenation of all the `groups` results along the channel axis.\n Input channels and `filters` must both be divisible by `groups`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`). Defaults to 'glorot_uniform'.\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`). Defaults to 'zeros'.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 3+D tensor with shape: `batch_shape + (steps, input_dim)`\n\n Output shape:\n 3+D tensor with shape: `batch_shape + (new_steps, filters)`\n `steps` value might have changed due to padding or strides.\n\n Returns:\n A tensor of rank 3 representing\n `activation(conv1d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n ", "arguments": [ { "name": "self", @@ -657,7 +708,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [ "Convolution1D" ] @@ -665,7 +716,7 @@ { "name": "Conv1DTranspose", "base": "Conv1D", - "docstring": "Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer length of the 1D convolution window.\n strides: An integer specifying the stride of the convolution along the\n time dimension. Specifying a stride value != 1 is incompatible with\n specifying a `dilation_rate` value != 1. Defaults to 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n output_padding: An integer specifying the amount of padding along\n the time dimension of the output tensor.\n The amount of output padding must be lower than the stride.\n If set to `None` (default), the output shape is inferred.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, length)`.\n dilation_rate: an integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying a `dilation_rate` value != 1 is\n incompatible with specifying a stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 3D tensor with shape:\n `(batch_size, steps, channels)`\n\n Output shape:\n 3D tensor with shape:\n `(batch_size, new_steps, filters)`\n If `output_padding` is specified:\n ```\n new_timesteps = ((timesteps - 1) * strides + kernel_size -\n 2 * padding + output_padding)\n ```\n\n Returns:\n A tensor of rank 3 representing\n `activation(conv1dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep learning](\n https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional Networks](\n https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n ", + "docstring": "Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers or `None`, does not include the sample axis),\n e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels.\n\n Args:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer length of the 1D convolution window.\n strides: An integer specifying the stride of the convolution along the\n time dimension. Specifying a stride value != 1 is incompatible with\n specifying a `dilation_rate` value != 1. Defaults to 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n output_padding: An integer specifying the amount of padding along\n the time dimension of the output tensor.\n The amount of output padding must be lower than the stride.\n If set to `None` (default), the output shape is inferred.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, length)`.\n dilation_rate: an integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying a `dilation_rate` value != 1 is\n incompatible with specifying a stride value != 1.\n Also dilation rate larger than 1 is not currently supported.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`). Defaults to 'glorot_uniform'.\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`). Defaults to 'zeros'.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 3D tensor with shape:\n `(batch_size, steps, channels)`\n\n Output shape:\n 3D tensor with shape:\n `(batch_size, new_steps, filters)`\n If `output_padding` is specified:\n ```\n new_timesteps = ((timesteps - 1) * strides + kernel_size -\n 2 * padding + output_padding)\n ```\n\n Returns:\n A tensor of rank 3 representing\n `activation(conv1dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep learning](\n https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional Networks](\n https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n ", "arguments": [ { "name": "self", @@ -749,7 +800,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [ "Convolution1DTranspose" ] @@ -757,7 +808,7 @@ { "name": "Conv2D", "base": "Conv", - "docstring": "2D convolution layer (e.g. spatial convolution over images).\n\n This layer creates a convolution kernel that is convolved\n with the layer input to produce a tensor of\n outputs. If `use_bias` is True,\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\n in `data_format=\"channels_last\"`.\n\n Examples:\n\n >>> # The inputs are 28x28 RGB images with `channels_last` and the batch\n >>> # size is 4.\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 26, 26, 2)\n\n >>> # With `dilation_rate` as 2.\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', dilation_rate=2, input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 24, 24, 2)\n\n >>> # With `padding` as \"same\".\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', padding=\"same\", input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 28, 28, 2)\n\n >>> # With extended batch shape [4, 7]:\n >>> input_shape = (4, 7, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', input_shape=input_shape[2:])(x)\n >>> print(y.shape)\n (4, 7, 26, 26, 2)\n\n\n Arguments:\n filters: Integer, the dimensionality of the output space (i.e. the number of\n output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the height\n and width of the 2D convolution window. Can be a single integer to specify\n the same value for all spatial dimensions.\n strides: An integer or tuple/list of 2 integers, specifying the strides of\n the convolution along the height and width. Can be a single integer to\n specify the same value for all spatial dimensions. Specifying any stride\n value != 1 is incompatible with specifying any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `(batch_size, height, width, channels)` while\n `channels_first` corresponds to inputs with shape `(batch_size, channels,\n height, width)`. It defaults to the `image_data_format` value found in\n your Keras config file at `~/.keras/keras.json`. If you never set it, then\n it will be `channels_last`.\n dilation_rate: an integer or tuple/list of 2 integers, specifying the\n dilation rate to use for dilated convolution. Can be a single integer to\n specify the same value for all spatial dimensions. Currently, specifying\n any `dilation_rate` value != 1 is incompatible with specifying any stride\n value != 1.\n groups: A positive integer specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved separately\n with `filters / groups` filters. The output is the concatenation of all\n the `groups` results along the channel axis. Input channels and `filters`\n must both be divisible by `groups`.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied (see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (see\n `keras.initializers`).\n bias_initializer: Initializer for the bias vector (see\n `keras.initializers`).\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (see\n `keras.regularizers`).\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (see\n `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (see\n `keras.constraints`).\n Input shape:\n 4+D tensor with shape: `batch_shape + (channels, rows, cols)` if\n `data_format='channels_first'`\n or 4+D tensor with shape: `batch_shape + (rows, cols, channels)` if\n `data_format='channels_last'`.\n Output shape:\n 4+D tensor with shape: `batch_shape + (filters, new_rows, new_cols)` if\n `data_format='channels_first'` or 4+D tensor with shape: `batch_shape +\n (new_rows, new_cols, filters)` if `data_format='channels_last'`. `rows`\n and `cols` values might have changed due to padding.\n\n Returns:\n A tensor of rank 4+ representing\n `activation(conv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is `\"causal\"`.\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n ", + "docstring": "2D convolution layer (e.g. spatial convolution over images).\n\n This layer creates a convolution kernel that is convolved\n with the layer input to produce a tensor of\n outputs. If `use_bias` is True,\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers or `None`, does not include the sample axis),\n e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\n in `data_format=\"channels_last\"`. You can use `None` when\n a dimension has variable size.\n\n Examples:\n\n >>> # The inputs are 28x28 RGB images with `channels_last` and the batch\n >>> # size is 4.\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 26, 26, 2)\n\n >>> # With `dilation_rate` as 2.\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', dilation_rate=2, input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 24, 24, 2)\n\n >>> # With `padding` as \"same\".\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', padding=\"same\", input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 28, 28, 2)\n\n >>> # With extended batch shape [4, 7]:\n >>> input_shape = (4, 7, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', input_shape=input_shape[2:])(x)\n >>> print(y.shape)\n (4, 7, 26, 26, 2)\n\n\n Args:\n filters: Integer, the dimensionality of the output space (i.e. the number of\n output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the height\n and width of the 2D convolution window. Can be a single integer to specify\n the same value for all spatial dimensions.\n strides: An integer or tuple/list of 2 integers, specifying the strides of\n the convolution along the height and width. Can be a single integer to\n specify the same value for all spatial dimensions. Specifying any stride\n value != 1 is incompatible with specifying any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `(batch_size, height, width, channels)` while\n `channels_first` corresponds to inputs with shape `(batch_size, channels,\n height, width)`. It defaults to the `image_data_format` value found in\n your Keras config file at `~/.keras/keras.json`. If you never set it, then\n it will be `channels_last`.\n dilation_rate: an integer or tuple/list of 2 integers, specifying the\n dilation rate to use for dilated convolution. Can be a single integer to\n specify the same value for all spatial dimensions. Currently, specifying\n any `dilation_rate` value != 1 is incompatible with specifying any stride\n value != 1.\n groups: A positive integer specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved separately\n with `filters / groups` filters. The output is the concatenation of all\n the `groups` results along the channel axis. Input channels and `filters`\n must both be divisible by `groups`.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied (see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (see\n `keras.initializers`). Defaults to 'glorot_uniform'.\n bias_initializer: Initializer for the bias vector (see\n `keras.initializers`). Defaults to 'zeros'.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix (see `keras.regularizers`). \n bias_regularizer: Regularizer function applied to the bias vector (see\n `keras.regularizers`). \n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (see\n `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (see\n `keras.constraints`).\n Input shape:\n 4+D tensor with shape: `batch_shape + (channels, rows, cols)` if\n `data_format='channels_first'`\n or 4+D tensor with shape: `batch_shape + (rows, cols, channels)` if\n `data_format='channels_last'`.\n Output shape:\n 4+D tensor with shape: `batch_shape + (filters, new_rows, new_cols)` if\n `data_format='channels_first'` or 4+D tensor with shape: `batch_shape +\n (new_rows, new_cols, filters)` if `data_format='channels_last'`. `rows`\n and `cols` values might have changed due to padding.\n\n Returns:\n A tensor of rank 4+ representing\n `activation(conv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is `\"causal\"`.\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n ", "arguments": [ { "name": "self", @@ -847,7 +898,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [ "Convolution2D" ] @@ -855,7 +906,7 @@ { "name": "Conv2DTranspose", "base": "Conv2D", - "docstring": "Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\n in `data_format=\"channels_last\"`.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n output_padding: An integer or tuple/list of 2 integers,\n specifying the amount of padding along the height and width\n of the output tensor.\n Can be a single integer to specify the same value for all\n spatial dimensions.\n The amount of output padding along a given dimension must be\n lower than the stride along that same dimension.\n If set to `None` (default), the output shape is inferred.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n If `output_padding` is specified:\n ```\n new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +\n output_padding[0])\n new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +\n output_padding[1])\n ```\n\n Returns:\n A tensor of rank 4 representing\n `activation(conv2dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep\n learning](https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional\n Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n ", + "docstring": "Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers or `None`, does not include the sample axis),\n e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\n in `data_format=\"channels_last\"`.\n\n Args:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n output_padding: An integer or tuple/list of 2 integers,\n specifying the amount of padding along the height and width\n of the output tensor.\n Can be a single integer to specify the same value for all\n spatial dimensions.\n The amount of output padding along a given dimension must be\n lower than the stride along that same dimension.\n If set to `None` (default), the output shape is inferred.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`). Defaults to 'glorot_uniform'.\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`). Defaults to 'zeros'.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n If `output_padding` is specified:\n ```\n new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +\n output_padding[0])\n new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +\n output_padding[1])\n ```\n\n Returns:\n A tensor of rank 4 representing\n `activation(conv2dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep\n learning](https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional\n Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n ", "arguments": [ { "name": "self", @@ -945,7 +996,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [ "Convolution2DTranspose" ] @@ -953,7 +1004,7 @@ { "name": "Conv3D", "base": "Conv", - "docstring": "3D convolution layer (e.g. spatial convolution over volumes).\n\n This layer creates a convolution kernel that is convolved\n with the layer input to produce a tensor of\n outputs. If `use_bias` is True,\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes\n with a single channel,\n in `data_format=\"channels_last\"`.\n\n Examples:\n\n >>> # The inputs are 28x28x28 volumes with a single channel, and the\n >>> # batch size is 4\n >>> input_shape =(4, 28, 28, 28, 1)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv3D(\n ... 2, 3, activation='relu', input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 26, 26, 26, 2)\n\n >>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of 3D frames,\n >>> # with 7 frames per video.\n >>> input_shape = (4, 7, 28, 28, 28, 1)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv3D(\n ... 2, 3, activation='relu', input_shape=input_shape[2:])(x)\n >>> print(y.shape)\n (4, 7, 26, 26, 26, 2)\n\n Arguments:\n filters: Integer, the dimensionality of the output space (i.e. the number of\n output filters in the convolution).\n kernel_size: An integer or tuple/list of 3 integers, specifying the depth,\n height and width of the 3D convolution window. Can be a single integer to\n specify the same value for all spatial dimensions.\n strides: An integer or tuple/list of 3 integers, specifying the strides of\n the convolution along each spatial dimension. Can be a single integer to\n specify the same value for all spatial dimensions. Specifying any stride\n value != 1 is incompatible with specifying any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `batch_shape + (spatial_dim1, spatial_dim2,\n spatial_dim3, channels)` while `channels_first` corresponds to inputs with\n shape `batch_shape + (channels, spatial_dim1, spatial_dim2,\n spatial_dim3)`. It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`. If you never set it, then it\n will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 3 integers, specifying the\n dilation rate to use for dilated convolution. Can be a single integer to\n specify the same value for all spatial dimensions. Currently, specifying\n any `dilation_rate` value != 1 is incompatible with specifying any stride\n value != 1.\n groups: A positive integer specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved separately\n with `filters / groups` filters. The output is the concatenation of all\n the `groups` results along the channel axis. Input channels and `filters`\n must both be divisible by `groups`.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied (see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (see\n `keras.initializers`).\n bias_initializer: Initializer for the bias vector (see\n `keras.initializers`).\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (see\n `keras.regularizers`).\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (see\n `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (see\n `keras.constraints`).\n Input shape:\n 5+D tensor with shape: `batch_shape + (channels, conv_dim1, conv_dim2,\n conv_dim3)` if data_format='channels_first'\n or 5+D tensor with shape: `batch_shape + (conv_dim1, conv_dim2, conv_dim3,\n channels)` if data_format='channels_last'.\n Output shape:\n 5+D tensor with shape: `batch_shape + (filters, new_conv_dim1,\n new_conv_dim2, new_conv_dim3)` if data_format='channels_first'\n or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2,\n new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`,\n `new_conv_dim2` and `new_conv_dim3` values might have changed due to\n padding.\n\n Returns:\n A tensor of rank 5+ representing\n `activation(conv3d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n ", + "docstring": "3D convolution layer (e.g. spatial convolution over volumes).\n\n This layer creates a convolution kernel that is convolved\n with the layer input to produce a tensor of\n outputs. If `use_bias` is True,\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers or `None`, does not include the sample axis),\n e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes\n with a single channel,\n in `data_format=\"channels_last\"`.\n\n Examples:\n\n >>> # The inputs are 28x28x28 volumes with a single channel, and the\n >>> # batch size is 4\n >>> input_shape =(4, 28, 28, 28, 1)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv3D(\n ... 2, 3, activation='relu', input_shape=input_shape[1:])(x)\n >>> print(y.shape)\n (4, 26, 26, 26, 2)\n\n >>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of 3D frames,\n >>> # with 7 frames per video.\n >>> input_shape = (4, 7, 28, 28, 28, 1)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv3D(\n ... 2, 3, activation='relu', input_shape=input_shape[2:])(x)\n >>> print(y.shape)\n (4, 7, 26, 26, 26, 2)\n\n Args:\n filters: Integer, the dimensionality of the output space (i.e. the number of\n output filters in the convolution).\n kernel_size: An integer or tuple/list of 3 integers, specifying the depth,\n height and width of the 3D convolution window. Can be a single integer to\n specify the same value for all spatial dimensions.\n strides: An integer or tuple/list of 3 integers, specifying the strides of\n the convolution along each spatial dimension. Can be a single integer to\n specify the same value for all spatial dimensions. Specifying any stride\n value != 1 is incompatible with specifying any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `batch_shape + (spatial_dim1, spatial_dim2,\n spatial_dim3, channels)` while `channels_first` corresponds to inputs with\n shape `batch_shape + (channels, spatial_dim1, spatial_dim2,\n spatial_dim3)`. It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`. If you never set it, then it\n will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 3 integers, specifying the\n dilation rate to use for dilated convolution. Can be a single integer to\n specify the same value for all spatial dimensions. Currently, specifying\n any `dilation_rate` value != 1 is incompatible with specifying any stride\n value != 1.\n groups: A positive integer specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved separately\n with `filters / groups` filters. The output is the concatenation of all\n the `groups` results along the channel axis. Input channels and `filters`\n must both be divisible by `groups`.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied (see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (see\n `keras.initializers`). Defaults to 'glorot_uniform'.\n bias_initializer: Initializer for the bias vector (see\n `keras.initializers`). Defaults to 'zeros'.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (see\n `keras.regularizers`).\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (see\n `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (see\n `keras.constraints`).\n Input shape:\n 5+D tensor with shape: `batch_shape + (channels, conv_dim1, conv_dim2,\n conv_dim3)` if data_format='channels_first'\n or 5+D tensor with shape: `batch_shape + (conv_dim1, conv_dim2, conv_dim3,\n channels)` if data_format='channels_last'.\n Output shape:\n 5+D tensor with shape: `batch_shape + (filters, new_conv_dim1,\n new_conv_dim2, new_conv_dim3)` if data_format='channels_first'\n or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2,\n new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`,\n `new_conv_dim2` and `new_conv_dim3` values might have changed due to\n padding.\n\n Returns:\n A tensor of rank 5+ representing\n `activation(conv3d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n ", "arguments": [ { "name": "self", @@ -1045,7 +1096,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [ "Convolution3D" ] @@ -1053,7 +1104,7 @@ { "name": "Conv3DTranspose", "base": "Conv3D", - "docstring": "Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels\n if `data_format=\"channels_last\"`.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 3 integers, specifying the\n depth, height and width of the 3D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the convolution along the depth, height\n and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n output_padding: An integer or tuple/list of 3 integers,\n specifying the amount of padding along the depth, height, and\n width.\n Can be a single integer to specify the same value for all\n spatial dimensions.\n The amount of output padding along a given dimension must be\n lower than the stride along that same dimension.\n If set to `None` (default), the output shape is inferred.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, depth, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, depth, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 3 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (\n see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 5D tensor with shape:\n `(batch_size, channels, depth, rows, cols)` if data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, depth, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 5D tensor with shape:\n `(batch_size, filters, new_depth, new_rows, new_cols)` if\n data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, new_depth, new_rows, new_cols, filters)` if\n data_format='channels_last'.\n `depth` and `rows` and `cols` values might have changed due to padding.\n If `output_padding` is specified::\n ```\n new_depth = ((depth - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +\n output_padding[0])\n new_rows = ((rows - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +\n output_padding[1])\n new_cols = ((cols - 1) * strides[2] + kernel_size[2] - 2 * padding[2] +\n output_padding[2])\n ```\n\n Returns:\n A tensor of rank 5 representing\n `activation(conv3dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep\n learning](https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional\n Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n ", + "docstring": "Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers or `None`, does not include the sample axis),\n e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels\n if `data_format=\"channels_last\"`.\n\n Args:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 3 integers, specifying the\n depth, height and width of the 3D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the convolution along the depth, height\n and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n output_padding: An integer or tuple/list of 3 integers,\n specifying the amount of padding along the depth, height, and\n width.\n Can be a single integer to specify the same value for all\n spatial dimensions.\n The amount of output padding along a given dimension must be\n lower than the stride along that same dimension.\n If set to `None` (default), the output shape is inferred.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, depth, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, depth, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 3 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`). Defaults to 'glorot_uniform'.\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`). Defaults to 'zeros'.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (\n see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 5D tensor with shape:\n `(batch_size, channels, depth, rows, cols)` if data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, depth, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 5D tensor with shape:\n `(batch_size, filters, new_depth, new_rows, new_cols)` if\n data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, new_depth, new_rows, new_cols, filters)` if\n data_format='channels_last'.\n `depth` and `rows` and `cols` values might have changed due to padding.\n If `output_padding` is specified::\n ```\n new_depth = ((depth - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +\n output_padding[0])\n new_rows = ((rows - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +\n output_padding[1])\n new_cols = ((cols - 1) * strides[2] + kernel_size[2] - 2 * padding[2] +\n output_padding[2])\n ```\n\n Returns:\n A tensor of rank 5 representing\n `activation(conv3dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep\n learning](https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional\n Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n ", "arguments": [ { "name": "self", @@ -1145,15 +1196,15 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [ "Convolution3DTranspose" ] }, { - "name": "ConvLSTM2D", - "base": "ConvRNN2D", - "docstring": "Convolutional LSTM.\n\n It is similar to an LSTM layer, but the input transformations\n and recurrent transformations are both convolutional.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the strides of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, time, ..., channels)`\n while `channels_first` corresponds to\n inputs with shape `(batch, time, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n By default hyperbolic tangent activation function is applied\n (`tanh(x)`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Use in combination with `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al., 2015](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence. (default False)\n return_state: Boolean Whether to return the last state\n in addition to the output. (default False)\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n\n Call arguments:\n inputs: A 5D tensor.\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether\n a given timestep should be masked.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or `recurrent_dropout`\n are set.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n\n Input shape:\n - If data_format='channels_first'\n 5D tensor with shape:\n `(samples, time, channels, rows, cols)`\n - If data_format='channels_last'\n 5D tensor with shape:\n `(samples, time, rows, cols, channels)`\n\n Output shape:\n - If `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)`\n if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)`\n if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n - If `return_sequences`: 5D tensor with shape:\n `(samples, timesteps, filters, new_rows, new_cols)`\n if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, timesteps, new_rows, new_cols, filters)`\n if data_format='channels_last'.\n - Else, 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)`\n if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)`\n if data_format='channels_last'.\n\n Raises:\n ValueError: in case of invalid constructor arguments.\n\n References:\n - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)\n (the current implementation does not include the feedback loop on the\n cells output).\n ", + "name": "ConvLSTM1D", + "base": "ConvLSTM", + "docstring": "1D Convolutional LSTM.\n\n Similar to an LSTM layer, but the input transformations\n and recurrent transformations are both convolutional.\n\n Args:\n filters: Integer, the dimensionality of the output space (i.e. the number of\n output filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers, specifying the strides of\n the convolution. Specifying any stride value != 1 is incompatible with\n specifying any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive). `\"valid\"` means no\n padding. `\"same\"` results in padding evenly to the left/right or up/down\n of the input such that output has the same height/width dimension as the\n input.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `(batch, time, ..., channels)` while `channels_first`\n corresponds to inputs with shape `(batch, time, channels, ...)`. It\n defaults to the `image_data_format` value found in your Keras config file\n at `~/.keras/keras.json`. If you never set it, then it will be\n \"channels_last\".\n dilation_rate: An integer or tuple/list of n integers, specifying the\n dilation rate to use for dilated convolution. Currently, specifying any\n `dilation_rate` value != 1 is incompatible with specifying any `strides`\n value != 1.\n activation: Activation function to use. By default hyperbolic tangent\n activation function is applied (`tanh(x)`).\n recurrent_activation: Activation function to use for the recurrent step.\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at\n initialization. Use in combination with `bias_initializer=\"zeros\"`. This\n is recommended in [Jozefowicz et al., 2015](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n return_sequences: Boolean. Whether to return the last output in the output\n sequence, or the full sequence. (default False)\n return_state: Boolean Whether to return the last state in addition to the\n output. (default False)\n go_backwards: Boolean (default False). If True, process the input sequence\n backwards.\n stateful: Boolean (default False). If True, the last state for each sample\n at index i in a batch will be used as initial state for the sample of\n index i in the following batch.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state.\n Call arguments:\n inputs: A 4D tensor.\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether a\n given timestep should be masked.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or `recurrent_dropout`\n are set.\n initial_state: List of initial state tensors to be passed to the first call\n of the cell.\n Input shape: - If data_format='channels_first'\n 4D tensor with shape: `(samples, time, channels, rows)` - If\n data_format='channels_last'\n 4D tensor with shape: `(samples, time, rows, channels)`\n Output shape:\n - If `return_state`: a list of tensors. The first tensor is the output. The\n remaining tensors are the last states,\n each 3D tensor with shape: `(samples, filters, new_rows)` if\n data_format='channels_first'\n or shape: `(samples, new_rows, filters)` if data_format='channels_last'.\n `rows` values might have changed due to padding.\n - If `return_sequences`: 4D tensor with shape: `(samples, timesteps,\n filters, new_rows)` if data_format='channels_first'\n or shape: `(samples, timesteps, new_rows, filters)` if\n data_format='channels_last'.\n - Else, 3D tensor with shape: `(samples, filters, new_rows)` if\n data_format='channels_first'\n or shape: `(samples, new_rows, filters)` if data_format='channels_last'.\n\n Raises:\n ValueError: in case of invalid constructor arguments.\n\n References:\n - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)\n (the current implementation does not include the feedback loop on the\n cells output).\n ", "arguments": [ { "name": "self", @@ -1169,10 +1220,7 @@ }, { "name": "strides", - "default": [ - 1, - 1 - ] + "default": 1 }, { "name": "padding", @@ -1184,10 +1232,7 @@ }, { "name": "dilation_rate", - "default": [ - 1, - 1 - ] + "default": 1 }, { "name": "activation", @@ -1300,145 +1345,55 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/convolutional_recurrent.py", + "file": "keras/layers/convolutional_recurrent.py", "aliases": [] }, { - "name": "Cropping1D", - "base": "Layer", - "docstring": "Cropping layer for 1D input (e.g. temporal sequence).\n\n It crops along the time dimension (axis 1).\n\n Examples:\n\n >>> input_shape = (2, 3, 2)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1]\n [ 2 3]\n [ 4 5]]\n [[ 6 7]\n [ 8 9]\n [10 11]]]\n >>> y = tf.keras.layers.Cropping1D(cropping=1)(x)\n >>> print(y)\n tf.Tensor(\n [[[2 3]]\n [[8 9]]], shape=(2, 1, 2), dtype=int64)\n\n Arguments:\n cropping: Int or tuple of int (length 2)\n How many units should be trimmed off at the beginning and end of\n the cropping dimension (axis 1).\n If a single int is provided, the same value will be used for both.\n\n Input shape:\n 3D tensor with shape `(batch_size, axis_to_crop, features)`\n\n Output shape:\n 3D tensor with shape `(batch_size, cropped_axis, features)`\n ", + "name": "ConvLSTM2D", + "base": "ConvLSTM", + "docstring": "2D Convolutional LSTM.\n\n Similar to an LSTM layer, but the input transformations\n and recurrent transformations are both convolutional.\n\n Args:\n filters: Integer, the dimensionality of the output space (i.e. the number of\n output filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers, specifying the strides of\n the convolution. Specifying any stride value != 1 is incompatible with\n specifying any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive). `\"valid\"` means no\n padding. `\"same\"` results in padding evenly to the left/right or up/down\n of the input such that output has the same height/width dimension as the\n input.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `(batch, time, ..., channels)` while `channels_first`\n corresponds to inputs with shape `(batch, time, channels, ...)`. It\n defaults to the `image_data_format` value found in your Keras config file\n at `~/.keras/keras.json`. If you never set it, then it will be\n \"channels_last\".\n dilation_rate: An integer or tuple/list of n integers, specifying the\n dilation rate to use for dilated convolution. Currently, specifying any\n `dilation_rate` value != 1 is incompatible with specifying any `strides`\n value != 1.\n activation: Activation function to use. By default hyperbolic tangent\n activation function is applied (`tanh(x)`).\n recurrent_activation: Activation function to use for the recurrent step.\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at\n initialization. Use in combination with `bias_initializer=\"zeros\"`. This\n is recommended in [Jozefowicz et al., 2015](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n return_sequences: Boolean. Whether to return the last output in the output\n sequence, or the full sequence. (default False)\n return_state: Boolean Whether to return the last state in addition to the\n output. (default False)\n go_backwards: Boolean (default False). If True, process the input sequence\n backwards.\n stateful: Boolean (default False). If True, the last state for each sample\n at index i in a batch will be used as initial state for the sample of\n index i in the following batch.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state.\n Call arguments:\n inputs: A 5D tensor.\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether a\n given timestep should be masked.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or `recurrent_dropout`\n are set.\n initial_state: List of initial state tensors to be passed to the first call\n of the cell.\n Input shape: - If data_format='channels_first'\n 5D tensor with shape: `(samples, time, channels, rows, cols)` - If\n data_format='channels_last'\n 5D tensor with shape: `(samples, time, rows, cols, channels)`\n Output shape:\n - If `return_state`: a list of tensors. The first tensor is the output. The\n remaining tensors are the last states,\n each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if\n data_format='channels_first'\n or shape: `(samples, new_rows, new_cols, filters)` if\n data_format='channels_last'. `rows` and `cols` values might have changed\n due to padding.\n - If `return_sequences`: 5D tensor with shape: `(samples, timesteps,\n filters, new_rows, new_cols)` if data_format='channels_first'\n or shape: `(samples, timesteps, new_rows, new_cols, filters)` if\n data_format='channels_last'.\n - Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if\n data_format='channels_first'\n or shape: `(samples, new_rows, new_cols, filters)` if\n data_format='channels_last'.\n\n Raises:\n ValueError: in case of invalid constructor arguments.\n\n References:\n - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)\n (the current implementation does not include the feedback loop on the\n cells output).\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "cropping", - "default": [ - 1, - 1 - ] - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", + "name": "filters", "default": null }, { - "name": "inputs", - "default": null - } - ], - "file": "tensorflow/python/keras/layers/convolutional.py", - "aliases": [] - }, - { - "name": "Cropping2D", - "base": "Layer", - "docstring": "Cropping layer for 2D input (e.g. picture).\n\n It crops along spatial dimensions, i.e. height and width.\n\n Examples:\n\n >>> input_shape = (2, 28, 28, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)\n >>> print(y.shape)\n (2, 24, 20, 3)\n\n Arguments:\n cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric cropping\n is applied to height and width.\n - If tuple of 2 ints:\n interpreted as two different\n symmetric cropping values for height and width:\n `(symmetric_height_crop, symmetric_width_crop)`.\n - If tuple of 2 tuples of 2 ints:\n interpreted as\n `((top_crop, bottom_crop), (left_crop, right_crop))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, cropped_rows, cropped_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, cropped_rows, cropped_cols)`\n ", - "arguments": [ - { - "name": "self", + "name": "kernel_size", "default": null }, { - "name": "cropping", + "name": "strides", "default": [ - [ - 0, - 0 - ], - [ - 0, - 0 - ] + 1, + 1 ] }, { - "name": "data_format", - "default": "None" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null - }, - { - "name": "inputs", - "default": null - } - ], - "file": "tensorflow/python/keras/layers/convolutional.py", - "aliases": [] - }, - { - "name": "Cropping3D", - "base": "Layer", - "docstring": "Cropping layer for 3D data (e.g. spatial or spatio-temporal).\n\n Examples:\n\n >>> input_shape = (2, 28, 28, 10, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.Cropping3D(cropping=(2, 4, 2))(x)\n >>> print(y.shape)\n (2, 24, 20, 6, 3)\n\n Arguments:\n cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric cropping\n is applied to depth, height, and width.\n - If tuple of 3 ints: interpreted as two different\n symmetric cropping values for depth, height, and width:\n `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.\n - If tuple of 3 tuples of 2 ints: interpreted as\n `((left_dim1_crop, right_dim1_crop), (left_dim2_crop,\n right_dim2_crop), (left_dim3_crop, right_dim3_crop))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_axis_to_crop, second_axis_to_crop,\n third_axis_to_crop)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_cropped_axis, second_cropped_axis, third_cropped_axis,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_cropped_axis, second_cropped_axis,\n third_cropped_axis)`\n ", - "arguments": [ - { - "name": "self", - "default": null - }, - { - "name": "cropping", - "default": [ - [ - 1, - 1 - ], - [ - 1, - 1 - ], - [ - 1, - 1 - ] - ] + "name": "padding", + "default": "valid" }, { "name": "data_format", "default": "None" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null }, { - "name": "inputs", - "default": null - } - ], - "file": "tensorflow/python/keras/layers/convolutional.py", - "aliases": [] - }, - { - "name": "Dense", - "base": "Layer", - "docstring": "Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: If the input to the layer has a rank greater than 2, then `Dense`\n computes the dot product between the `inputs` and the `kernel` along the\n last axis of the `inputs` and axis 1 of the `kernel` (using `tf.tensordot`).\n For example, if input has dimensions `(batch_size, d0, d1)`,\n then we create a `kernel` with shape `(d1, units)`, and the `kernel` operates\n along axis 2 of the `input`, on every sub-tensor of shape `(1, 1, d1)`\n (there are `batch_size * d0` such sub-tensors).\n The output in this case will have shape `(batch_size, d0, units)`.\n\n Besides, layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n\n Example:\n\n >>> # Create a `Sequential` model and add a Dense layer as the first layer.\n >>> model = tf.keras.models.Sequential()\n >>> model.add(tf.keras.Input(shape=(16,)))\n >>> model.add(tf.keras.layers.Dense(32, activation='relu'))\n >>> # Now the model will take as input arrays of shape (None, 16)\n >>> # and output arrays of shape (None, 32).\n >>> # Note that after the first layer, you don't need to specify\n >>> # the size of the input anymore:\n >>> model.add(tf.keras.layers.Dense(32))\n >>> model.output_shape\n (None, 32)\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n N-D tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n Output shape:\n N-D tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n ", - "arguments": [ - { - "name": "self", - "default": null + "name": "dilation_rate", + "default": [ + 1, + 1 + ] }, { - "name": "units", - "default": null + "name": "activation", + "default": "tanh" }, { - "name": "activation", - "default": "None" + "name": "recurrent_activation", + "default": "hard_sigmoid" }, { "name": "use_bias", @@ -1449,14 +1404,27 @@ "name": "kernel_initializer", "default": "glorot_uniform" }, + { + "name": "recurrent_initializer", + "default": "orthogonal" + }, { "name": "bias_initializer", "default": "zeros" }, + { + "name": "unit_forget_bias", + "default": "True", + "type": "boolean" + }, { "name": "kernel_regularizer", "default": "None" }, + { + "name": "recurrent_regularizer", + "default": "None" + }, { "name": "bias_regularizer", "default": "None" @@ -1470,46 +1438,40 @@ "default": "None" }, { - "name": "bias_constraint", + "name": "recurrent_constraint", "default": "None" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ + }, { - "name": "self", - "default": null + "name": "bias_constraint", + "default": "None" }, { - "name": "inputs", - "default": null - } - ], - "file": "tensorflow/python/keras/layers/core.py", - "aliases": [] - }, - { - "name": "DenseFeatures", - "base": "DenseFeatures", - "docstring": "A layer that produces a dense `Tensor` based on given `feature_columns`.\n\n Generally a single example in training data is described with FeatureColumns.\n At the first layer of the model, this column oriented data should be converted\n to a single `Tensor`.\n\n This layer can be called multiple times with different features.\n\n This is the V2 version of this layer that uses name_scopes to create\n variables instead of variable_scopes. But this approach currently lacks\n support for partitioned variables. In that case, use the V1 version instead.\n\n Example:\n\n ```python\n price = tf.feature_column.numeric_column('price')\n keywords_embedded = tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_hash_bucket(\"keywords\", 10K),\n dimensions=16)\n columns = [price, keywords_embedded, ...]\n feature_layer = tf.keras.layers.DenseFeatures(columns)\n\n features = tf.io.parse_example(\n ..., features=tf.feature_column.make_parse_example_spec(columns))\n dense_tensor = feature_layer(features)\n for units in [128, 64, 32]:\n dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor)\n prediction = tf.keras.layers.Dense(1)(dense_tensor)\n ```\n ", - "arguments": [ + "name": "return_sequences", + "default": "False", + "type": "boolean" + }, { - "name": "self", - "default": null + "name": "return_state", + "default": "False", + "type": "boolean" }, { - "name": "feature_columns", - "default": null + "name": "go_backwards", + "default": "False", + "type": "boolean" }, { - "name": "trainable", - "default": "True", + "name": "stateful", + "default": "False", "type": "boolean" }, { - "name": "name", - "default": "None" + "name": "dropout", + "default": 0.0 + }, + { + "name": "recurrent_dropout", + "default": 0.0 } ], "abstract": false, @@ -1520,30 +1482,38 @@ "default": null }, { - "name": "features", + "name": "inputs", "default": null }, { - "name": "cols_to_output_tensors", + "name": "mask", "default": "None" }, { "name": "training", "default": "None" + }, + { + "name": "initial_state", + "default": "None" } ], - "file": "tensorflow/python/keras/feature_column/dense_features_v2.py", + "file": "keras/layers/convolutional_recurrent.py", "aliases": [] }, { - "name": "DepthwiseConv2D", - "base": "Conv2D", - "docstring": "Depthwise separable 2D convolution.\n\n Depthwise Separable convolutions consist of performing\n just the first step in a depthwise spatial convolution\n (which acts on each input channel separately).\n The `depth_multiplier` argument controls how many\n output channels are generated per input channel in the depthwise step.\n\n Arguments:\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `'valid'` or `'same'` (case-insensitive).\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be 'channels_last'.\n dilation_rate: An integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: Initializer for the depthwise kernel matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n depthwise_regularizer: Regularizer function applied to\n the depthwise kernel matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its 'activation') (\n see `keras.regularizers`).\n depthwise_constraint: Constraint function applied to\n the depthwise kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `[batch_size, channels, rows, cols]` if data_format='channels_first'\n or 4D tensor with shape:\n `[batch_size, rows, cols, channels]` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `[batch_size, filters, new_rows, new_cols]` if data_format='channels_first'\n or 4D tensor with shape:\n `[batch_size, new_rows, new_cols, filters]` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n\n Returns:\n A tensor of rank 4 representing\n `activation(depthwiseconv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n ", + "name": "ConvLSTM3D", + "base": "ConvLSTM", + "docstring": "3D Convolutional LSTM.\n\n Similar to an LSTM layer, but the input transformations\n and recurrent transformations are both convolutional.\n\n Args:\n filters: Integer, the dimensionality of the output space (i.e. the number of\n output filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers, specifying the strides of\n the convolution. Specifying any stride value != 1 is incompatible with\n specifying any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive). `\"valid\"` means no\n padding. `\"same\"` results in padding evenly to the left/right or up/down\n of the input such that output has the same height/width dimension as the\n input.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs. `channels_last` corresponds\n to inputs with shape `(batch, time, ..., channels)` while `channels_first`\n corresponds to inputs with shape `(batch, time, channels, ...)`. It\n defaults to the `image_data_format` value found in your Keras config file\n at `~/.keras/keras.json`. If you never set it, then it will be\n \"channels_last\".\n dilation_rate: An integer or tuple/list of n integers, specifying the\n dilation rate to use for dilated convolution. Currently, specifying any\n `dilation_rate` value != 1 is incompatible with specifying any `strides`\n value != 1.\n activation: Activation function to use. By default hyperbolic tangent\n activation function is applied (`tanh(x)`).\n recurrent_activation: Activation function to use for the recurrent step.\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at\n initialization. Use in combination with `bias_initializer=\"zeros\"`. This\n is recommended in [Jozefowicz et al., 2015](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n return_sequences: Boolean. Whether to return the last output in the output\n sequence, or the full sequence. (default False)\n return_state: Boolean Whether to return the last state in addition to the\n output. (default False)\n go_backwards: Boolean (default False). If True, process the input sequence\n backwards.\n stateful: Boolean (default False). If True, the last state for each sample\n at index i in a batch will be used as initial state for the sample of\n index i in the following batch.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state.\n Call arguments:\n inputs: A 6D tensor.\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether a\n given timestep should be masked.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or `recurrent_dropout`\n are set.\n initial_state: List of initial state tensors to be passed to the first call\n of the cell.\n Input shape: - If data_format='channels_first'\n 6D tensor with shape: `(samples, time, channels, rows, cols, depth)` -\n If data_format='channels_last'\n 5D tensor with shape: `(samples, time, rows, cols, depth, channels)`\n Output shape:\n - If `return_state`: a list of tensors. The first tensor is the output. The\n remaining tensors are the last states,\n each 5D tensor with shape: `(samples, filters, new_rows, new_cols,\n new_depth)` if data_format='channels_first'\n or shape: `(samples, new_rows, new_cols, new_depth, filters)` if\n data_format='channels_last'. `rows`, `cols`, and `depth` values might\n have changed due to padding.\n - If `return_sequences`: 6D tensor with shape: `(samples, timesteps,\n filters, new_rows, new_cols, new_depth)` if data_format='channels_first'\n or shape: `(samples, timesteps, new_rows, new_cols, new_depth, filters)`\n if data_format='channels_last'.\n - Else, 5D tensor with shape: `(samples, filters, new_rows, new_cols,\n new_depth)` if data_format='channels_first'\n or shape: `(samples, new_rows, new_cols, new_depth, filters)` if\n data_format='channels_last'.\n\n Raises:\n ValueError: in case of invalid constructor arguments.\n\n References:\n - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)\n (the current implementation does not include the feedback loop on the\n cells output).\n ", "arguments": [ { "name": "self", "default": null }, + { + "name": "filters", + "default": null + }, { "name": "kernel_size", "default": null @@ -1551,6 +1521,7 @@ { "name": "strides", "default": [ + 1, 1, 1 ] @@ -1559,10 +1530,6 @@ "name": "padding", "default": "valid" }, - { - "name": "depth_multiplier", - "default": 1 - }, { "name": "data_format", "default": "None" @@ -1570,13 +1537,18 @@ { "name": "dilation_rate", "default": [ + 1, 1, 1 ] }, { "name": "activation", - "default": "None" + "default": "tanh" + }, + { + "name": "recurrent_activation", + "default": "hard_sigmoid" }, { "name": "use_bias", @@ -1584,15 +1556,28 @@ "type": "boolean" }, { - "name": "depthwise_initializer", + "name": "kernel_initializer", "default": "glorot_uniform" }, + { + "name": "recurrent_initializer", + "default": "orthogonal" + }, { "name": "bias_initializer", "default": "zeros" }, { - "name": "depthwise_regularizer", + "name": "unit_forget_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "kernel_regularizer", + "default": "None" + }, + { + "name": "recurrent_regularizer", "default": "None" }, { @@ -1604,12 +1589,44 @@ "default": "None" }, { - "name": "depthwise_constraint", + "name": "kernel_constraint", + "default": "None" + }, + { + "name": "recurrent_constraint", "default": "None" }, { "name": "bias_constraint", "default": "None" + }, + { + "name": "return_sequences", + "default": "False", + "type": "boolean" + }, + { + "name": "return_state", + "default": "False", + "type": "boolean" + }, + { + "name": "go_backwards", + "default": "False", + "type": "boolean" + }, + { + "name": "stateful", + "default": "False", + "type": "boolean" + }, + { + "name": "dropout", + "default": 0.0 + }, + { + "name": "recurrent_dropout", + "default": 0.0 } ], "abstract": false, @@ -1622,28 +1639,38 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" + }, + { + "name": "training", + "default": "None" + }, + { + "name": "initial_state", + "default": "None" } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional_recurrent.py", "aliases": [] }, { - "name": "Dot", - "base": "_Merge", - "docstring": "Layer that computes a dot product between samples in two tensors.\n\n E.g. if applied to a list of two tensors `a` and `b` of shape\n `(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)`\n where each entry `i` will be the dot product between\n `a[i]` and `b[i]`.\n\n >>> x = np.arange(10).reshape(1, 5, 2)\n >>> print(x)\n [[[0 1]\n [2 3]\n [4 5]\n [6 7]\n [8 9]]]\n >>> y = np.arange(10, 20).reshape(1, 2, 5)\n >>> print(y)\n [[[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> tf.keras.layers.Dot(axes=(1, 2))([x, y])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> dotted = tf.keras.layers.Dot(axes=1)([x1, x2])\n >>> dotted.shape\n TensorShape([5, 1])\n\n\n ", + "name": "Cropping1D", + "base": "Layer", + "docstring": "Cropping layer for 1D input (e.g. temporal sequence).\n\n It crops along the time dimension (axis 1).\n\n Examples:\n\n >>> input_shape = (2, 3, 2)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1]\n [ 2 3]\n [ 4 5]]\n [[ 6 7]\n [ 8 9]\n [10 11]]]\n >>> y = tf.keras.layers.Cropping1D(cropping=1)(x)\n >>> print(y)\n tf.Tensor(\n [[[2 3]]\n [[8 9]]], shape=(2, 1, 2), dtype=int64)\n\n Args:\n cropping: Int or tuple of int (length 2)\n How many units should be trimmed off at the beginning and end of\n the cropping dimension (axis 1).\n If a single int is provided, the same value will be used for both.\n\n Input shape:\n 3D tensor with shape `(batch_size, axis_to_crop, features)`\n\n Output shape:\n 3D tensor with shape `(batch_size, cropped_axis, features)`\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "axes", - "default": null - }, - { - "name": "normalize", - "default": "False", - "type": "boolean" + "name": "cropping", + "default": [ + 1, + 1 + ] } ], "abstract": false, @@ -1658,28 +1685,33 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/merge.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { - "name": "Dropout", + "name": "Cropping2D", "base": "Layer", - "docstring": "Applies Dropout to the input.\n\n The Dropout layer randomly sets input units to 0 with a frequency of `rate`\n at each step during training time, which helps prevent overfitting.\n Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over\n all inputs is unchanged.\n\n Note that the Dropout layer only applies when `training` is set to True\n such that no values are dropped during inference. When using `model.fit`,\n `training` will be appropriately set to True automatically, and in other\n contexts, you can set the kwarg explicitly to True when calling the layer.\n\n (This is in contrast to setting `trainable=False` for a Dropout layer.\n `trainable` does not affect the layer's behavior, as Dropout does\n not have any variables/weights that can be frozen during training.)\n\n >>> tf.random.set_seed(0)\n >>> layer = tf.keras.layers.Dropout(.2, input_shape=(2,))\n >>> data = np.arange(10).reshape(5, 2).astype(np.float32)\n >>> print(data)\n [[0. 1.]\n [2. 3.]\n [4. 5.]\n [6. 7.]\n [8. 9.]]\n >>> outputs = layer(data, training=True)\n >>> print(outputs)\n tf.Tensor(\n [[ 0. 1.25]\n [ 2.5 3.75]\n [ 5. 6.25]\n [ 7.5 8.75]\n [10. 0. ]], shape=(5, 2), dtype=float32)\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n noise_shape: 1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.\n seed: A Python integer to use as random seed.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n ", + "docstring": "Cropping layer for 2D input (e.g. picture).\n\n It crops along spatial dimensions, i.e. height and width.\n\n Examples:\n\n >>> input_shape = (2, 28, 28, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)\n >>> print(y.shape)\n (2, 24, 20, 3)\n\n Args:\n cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric cropping\n is applied to height and width.\n - If tuple of 2 ints:\n interpreted as two different\n symmetric cropping values for height and width:\n `(symmetric_height_crop, symmetric_width_crop)`.\n - If tuple of 2 tuples of 2 ints:\n interpreted as\n `((top_crop, bottom_crop), (left_crop, right_crop))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, cropped_rows, cropped_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, cropped_rows, cropped_cols)`\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "rate", - "default": null - }, - { - "name": "noise_shape", - "default": "None" + "name": "cropping", + "default": [ + [ + 0, + 0 + ], + [ + 0, + 0 + ] + ] }, { - "name": "seed", + "name": "data_format", "default": "None" } ], @@ -1693,27 +1725,40 @@ { "name": "inputs", "default": null - }, - { - "name": "training", - "default": "None" } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { - "name": "ELU", + "name": "Cropping3D", "base": "Layer", - "docstring": "Exponential Linear Unit.\n\n It follows:\n\n ```\n f(x) = alpha * (exp(x) - 1.) for x < 0\n f(x) = x for x >= 0\n ```\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Arguments:\n alpha: Scale for the negative factor.\n ", + "docstring": "Cropping layer for 3D data (e.g. spatial or spatio-temporal).\n\n Examples:\n\n >>> input_shape = (2, 28, 28, 10, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.Cropping3D(cropping=(2, 4, 2))(x)\n >>> print(y.shape)\n (2, 24, 20, 6, 3)\n\n Args:\n cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric cropping\n is applied to depth, height, and width.\n - If tuple of 3 ints: interpreted as two different\n symmetric cropping values for depth, height, and width:\n `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.\n - If tuple of 3 tuples of 2 ints: interpreted as\n `((left_dim1_crop, right_dim1_crop), (left_dim2_crop,\n right_dim2_crop), (left_dim3_crop, right_dim3_crop))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_axis_to_crop, second_axis_to_crop,\n third_axis_to_crop)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_cropped_axis, second_cropped_axis, third_cropped_axis,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_cropped_axis, second_cropped_axis,\n third_cropped_axis)`\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "alpha", - "default": 1.0 + "name": "cropping", + "default": [ + [ + 1, + 1 + ], + [ + 1, + 1 + ], + [ + 1, + 1 + ] + ] + }, + { + "name": "data_format", + "default": "None" } ], "abstract": false, @@ -1728,49 +1773,57 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/advanced_activations.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { - "name": "Embedding", + "name": "Dense", "base": "Layer", - "docstring": "Turns positive integers (indexes) into dense vectors of fixed size.\n\n e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`\n\n This layer can only be used as the first layer in a model.\n\n Example:\n\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Embedding(1000, 64, input_length=10))\n >>> # The model will take as input an integer matrix of size (batch,\n >>> # input_length), and the largest integer (i.e. word index) in the input\n >>> # should be no larger than 999 (vocabulary size).\n >>> # Now model.output_shape is (None, 10, 64), where `None` is the batch\n >>> # dimension.\n >>> input_array = np.random.randint(1000, size=(32, 10))\n >>> model.compile('rmsprop', 'mse')\n >>> output_array = model.predict(input_array)\n >>> print(output_array.shape)\n (32, 10, 64)\n\n Arguments:\n input_dim: Integer. Size of the vocabulary,\n i.e. maximum integer index + 1.\n output_dim: Integer. Dimension of the dense embedding.\n embeddings_initializer: Initializer for the `embeddings`\n matrix (see `keras.initializers`).\n embeddings_regularizer: Regularizer function applied to\n the `embeddings` matrix (see `keras.regularizers`).\n embeddings_constraint: Constraint function applied to\n the `embeddings` matrix (see `keras.constraints`).\n mask_zero: Boolean, whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n This is useful when using recurrent layers\n which may take variable length input.\n If this is `True`, then all subsequent layers\n in the model need to support masking or an exception will be raised.\n If mask_zero is set to True, as a consequence, index 0 cannot be\n used in the vocabulary (input_dim should equal size of\n vocabulary + 1).\n input_length: Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n\n Input shape:\n 2D tensor with shape: `(batch_size, input_length)`.\n\n Output shape:\n 3D tensor with shape: `(batch_size, input_length, output_dim)`.\n ", + "docstring": "Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`). These are all attributes of\n `Dense`.\n\n Note: If the input to the layer has a rank greater than 2, then `Dense`\n computes the dot product between the `inputs` and the `kernel` along the\n last axis of the `inputs` and axis 0 of the `kernel` (using `tf.tensordot`).\n For example, if input has dimensions `(batch_size, d0, d1)`,\n then we create a `kernel` with shape `(d1, units)`, and the `kernel` operates\n along axis 2 of the `input`, on every sub-tensor of shape `(1, 1, d1)`\n (there are `batch_size * d0` such sub-tensors).\n The output in this case will have shape `(batch_size, d0, units)`.\n\n Besides, layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n When a popular kwarg `input_shape` is passed, then keras will create\n an input layer to insert before the current layer. This can be treated\n equivalent to explicitly defining an `InputLayer`.\n\n Example:\n\n >>> # Create a `Sequential` model and add a Dense layer as the first layer.\n >>> model = tf.keras.models.Sequential()\n >>> model.add(tf.keras.Input(shape=(16,)))\n >>> model.add(tf.keras.layers.Dense(32, activation='relu'))\n >>> # Now the model will take as input arrays of shape (None, 16)\n >>> # and output arrays of shape (None, 32).\n >>> # Note that after the first layer, you don't need to specify\n >>> # the size of the input anymore:\n >>> model.add(tf.keras.layers.Dense(32))\n >>> model.output_shape\n (None, 32)\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n N-D tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n Output shape:\n N-D tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "input_dim", + "name": "units", "default": null }, { - "name": "output_dim", - "default": null + "name": "activation", + "default": "None" }, { - "name": "embeddings_initializer", - "default": "uniform" + "name": "use_bias", + "default": "True", + "type": "boolean" }, { - "name": "embeddings_regularizer", + "name": "kernel_initializer", + "default": "glorot_uniform" + }, + { + "name": "bias_initializer", + "default": "zeros" + }, + { + "name": "kernel_regularizer", "default": "None" }, { - "name": "activity_regularizer", + "name": "bias_regularizer", "default": "None" }, { - "name": "embeddings_constraint", + "name": "activity_regularizer", "default": "None" }, { - "name": "mask_zero", - "default": "False", - "type": "boolean" + "name": "kernel_constraint", + "default": "None" }, { - "name": "input_length", + "name": "bias_constraint", "default": "None" } ], @@ -1786,20 +1839,29 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/embeddings.py", + "file": "keras/layers/core.py", "aliases": [] }, { - "name": "Flatten", - "base": "Layer", - "docstring": "Flattens the input. Does not affect the batch size.\n\n Note: If inputs are shaped `(batch,)` without a feature axis, then\n flattening adds an extra channel dimension and output shape is `(batch, 1)`.\n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Example:\n\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Conv2D(64, 3, 3, input_shape=(3, 32, 32)))\n >>> model.output_shape\n (None, 1, 10, 64)\n\n >>> model.add(Flatten())\n >>> model.output_shape\n (None, 640)\n\n ", + "name": "DenseFeatures", + "base": "DenseFeatures", + "docstring": "A layer that produces a dense `Tensor` based on given `feature_columns`.\n\n Generally a single example in training data is described with FeatureColumns.\n At the first layer of the model, this column oriented data should be converted\n to a single `Tensor`.\n\n This layer can be called multiple times with different features.\n\n This is the V2 version of this layer that uses name_scopes to create\n variables instead of variable_scopes. But this approach currently lacks\n support for partitioned variables. In that case, use the V1 version instead.\n\n Example:\n\n ```python\n price = tf.feature_column.numeric_column('price')\n keywords_embedded = tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_hash_bucket(\"keywords\", 10K),\n dimensions=16)\n columns = [price, keywords_embedded, ...]\n feature_layer = tf.keras.layers.DenseFeatures(columns)\n\n features = tf.io.parse_example(\n ..., features=tf.feature_column.make_parse_example_spec(columns))\n dense_tensor = feature_layer(features)\n for units in [128, 64, 32]:\n dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor)\n prediction = tf.keras.layers.Dense(1)(dense_tensor)\n ```\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "data_format", + "name": "feature_columns", + "default": null + }, + { + "name": "trainable", + "default": "True", + "type": "boolean" + }, + { + "name": "name", "default": "None" } ], @@ -1811,33 +1873,63 @@ "default": null }, { - "name": "inputs", + "name": "features", "default": null + }, + { + "name": "cols_to_output_tensors", + "default": "None" + }, + { + "name": "training", + "default": "None" } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/feature_column/dense_features_v2.py", "aliases": [] }, { - "name": "GRU", - "base": "DropoutRNNCellMixin", - "docstring": "Gated Recurrent Unit - Cho et al. 2014.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Based on available runtime hardware and constraints, this layer\n will choose different implementations (cuDNN-based or pure-TensorFlow)\n to maximize the performance. If a GPU is available and all\n the arguments to the layer meet the requirement of the CuDNN kernel\n (see below for details), the layer will use a fast cuDNN implementation.\n\n The requirements to use the cuDNN implementation are:\n\n 1. `activation` == `tanh`\n 2. `recurrent_activation` == `sigmoid`\n 3. `recurrent_dropout` == 0\n 4. `unroll` is `False`\n 5. `use_bias` is `True`\n 6. `reset_after` is `True`\n 7. Inputs, if use masking, are strictly right-padded.\n 8. Eager execution is enabled in the outermost context.\n\n There are two variants of the GRU implementation. The default one is based on\n [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden\n state before matrix multiplication. The other one is based on\n [original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.\n\n The second variant is compatible with CuDNNGRU (GPU-only) and allows\n inference on CPU. Thus it has separate biases for `kernel` and\n `recurrent_kernel`. To use this variant, set `'reset_after'=True` and\n `recurrent_activation='sigmoid'`.\n\n For example:\n\n >>> inputs = tf.random.normal([32, 10, 8])\n >>> gru = tf.keras.layers.GRU(4)\n >>> output = gru(inputs)\n >>> print(output.shape)\n (32, 4)\n >>> gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True)\n >>> whole_sequence_output, final_state = gru(inputs)\n >>> print(whole_sequence_output.shape)\n (32, 10, 4)\n >>> print(final_state.shape)\n (32, 4)\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: sigmoid (`sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent\n state. Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications. Default: 2.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.\n return_state: Boolean. Whether to return the last state in addition to the\n output. Default: `False`.\n go_backwards: Boolean (default `False`).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `[timesteps, batch, feature]`, whereas in the False case, it will be\n `[batch, timesteps, feature]`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n reset_after: GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\",\n True = \"after\" (default and CuDNN compatible).\n\n Call arguments:\n inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.\n mask: Binary tensor of shape `[samples, timesteps]` indicating whether\n a given timestep should be masked (optional, defaults to `None`).\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used (optional, defaults to `None`).\n initial_state: List of initial state tensors to be passed to the first\n call of the cell (optional, defaults to `None` which causes creation\n of zero-filled initial state tensors).\n ", + "name": "DepthwiseConv2D", + "base": "Conv2D", + "docstring": "Depthwise 2D convolution.\n\n Depthwise convolution is a type of convolution in which a single convolutional\n filter is apply to each input channel (i.e. in a depthwise way).\n You can understand depthwise convolution as being\n the first step in a depthwise separable convolution.\n\n It is implemented via the following steps:\n\n - Split the input into individual channels.\n - Convolve each input with the layer's kernel (called a depthwise kernel).\n - Stack the convolved outputs together (along the channels axis).\n\n Unlike a regular 2D convolution, depthwise convolution does not mix\n information across different input channels.\n\n The `depth_multiplier` argument controls how many\n output channels are generated per input channel in the depthwise step.\n\n Args:\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `'valid'` or `'same'` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be 'channels_last'.\n dilation_rate: An integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: Initializer for the depthwise kernel matrix (\n see `keras.initializers`). If None, the default initializer (\n 'glorot_uniform') will be used.\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`). If None, the default initializer (\n 'zeros') will bs used.\n depthwise_regularizer: Regularizer function applied to\n the depthwise kernel matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its 'activation') (\n see `keras.regularizers`).\n depthwise_constraint: Constraint function applied to\n the depthwise kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `[batch_size, channels, rows, cols]` if data_format='channels_first'\n or 4D tensor with shape:\n `[batch_size, rows, cols, channels]` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `[batch_size, channels * depth_multiplier, new_rows, new_cols]` if\n data_format='channels_first' or 4D tensor with shape:\n `[batch_size, new_rows, new_cols, channels * depth_multiplier]` if\n data_format='channels_last'. `rows` and `cols` values might have\n changed due to padding.\n\n Returns:\n A tensor of rank 4 representing\n `activation(depthwiseconv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "units", + "name": "kernel_size", "default": null }, { - "name": "activation", - "default": "tanh" + "name": "strides", + "default": [ + 1, + 1 + ] }, { - "name": "recurrent_activation", - "default": "sigmoid" + "name": "padding", + "default": "valid" + }, + { + "name": "depth_multiplier", + "default": 1 + }, + { + "name": "data_format", + "default": "None" + }, + { + "name": "dilation_rate", + "default": [ + 1, + 1 + ] + }, + { + "name": "activation", + "default": "None" }, { "name": "use_bias", @@ -1845,23 +1937,15 @@ "type": "boolean" }, { - "name": "kernel_initializer", + "name": "depthwise_initializer", "default": "glorot_uniform" }, - { - "name": "recurrent_initializer", - "default": "orthogonal" - }, { "name": "bias_initializer", "default": "zeros" }, { - "name": "kernel_regularizer", - "default": "None" - }, - { - "name": "recurrent_regularizer", + "name": "depthwise_regularizer", "default": "None" }, { @@ -1873,63 +1957,49 @@ "default": "None" }, { - "name": "kernel_constraint", - "default": "None" - }, - { - "name": "recurrent_constraint", + "name": "depthwise_constraint", "default": "None" }, { "name": "bias_constraint", "default": "None" - }, - { - "name": "dropout", - "default": 0.0 - }, - { - "name": "recurrent_dropout", - "default": 0.0 - }, - { - "name": "implementation", - "default": 2 - }, - { - "name": "return_sequences", - "default": "False", - "type": "boolean" - }, + } + ], + "abstract": false, + "outputs": [], + "inputs": [ { - "name": "return_state", - "default": "False", - "type": "boolean" + "name": "self", + "default": null }, { - "name": "go_backwards", - "default": "False", - "type": "boolean" - }, + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/convolutional.py", + "aliases": [] + }, + { + "name": "Discretization", + "base": "PreprocessingLayer", + "docstring": "Buckets data into discrete ranges.\n\n This layer will place each element of its input data into one of several\n contiguous ranges and output an integer index indicating which range each\n element was placed in.\n\n Input shape:\n Any `tf.Tensor` or `tf.RaggedTensor` of dimension 2 or higher.\n\n Output shape:\n Same as input shape.\n\n Attributes:\n bin_boundaries: A list of bin boundaries. The leftmost and rightmost bins\n will always extend to `-inf` and `inf`, so `bin_boundaries=[0., 1., 2.]`\n generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. If\n this option is set, `adapt` should not be called.\n num_bins: The integer number of bins to compute. If this option is set,\n `adapt` should be called to learn the bin boundaries.\n epsilon: Error tolerance, typically a small fraction close to zero (e.g.\n 0.01). Higher values of epsilon increase the quantile approximation, and\n hence result in more unequal buckets, but could improve performance\n and resource consumption.\n\n Examples:\n\n Bucketize float values based on provided buckets.\n >>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])\n >>> layer = tf.keras.layers.Discretization(bin_boundaries=[0., 1., 2.])\n >>> layer(input)\n \n\n Bucketize float values based on a number of buckets to compute.\n >>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])\n >>> layer = tf.keras.layers.Discretization(num_bins=4, epsilon=0.01)\n >>> layer.adapt(input)\n >>> layer(input)\n \n ", + "arguments": [ { - "name": "stateful", - "default": "False", - "type": "boolean" + "name": "self", + "default": null }, { - "name": "unroll", - "default": "False", - "type": "boolean" + "name": "bin_boundaries", + "default": "None" }, { - "name": "time_major", - "default": "False", - "type": "boolean" + "name": "num_bins", + "default": "None" }, { - "name": "reset_after", - "default": "True", - "type": "boolean" + "name": "epsilon", + "default": 0.01 } ], "abstract": false, @@ -1942,101 +2012,65 @@ { "name": "inputs", "default": null - }, - { - "name": "mask", - "default": "None" - }, - { - "name": "training", - "default": "None" - }, - { - "name": "initial_state", - "default": "None" } ], - "file": "tensorflow/python/keras/layers/recurrent_v2.py", + "file": "keras/layers/preprocessing/discretization.py", "aliases": [] }, { - "name": "GRUCell", - "base": "GRUCell", - "docstring": "Cell class for the GRU layer.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This class processes one step within the whole time sequence input, whereas\n `tf.keras.layer.GRU` processes the whole sequence.\n\n For example:\n\n >>> inputs = tf.random.normal([32, 10, 8])\n >>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4))\n >>> output = rnn(inputs)\n >>> print(output.shape)\n (32, 4)\n >>> rnn = tf.keras.layers.RNN(\n ... tf.keras.layers.GRUCell(4),\n ... return_sequences=True,\n ... return_state=True)\n >>> whole_sequence_output, final_state = rnn(inputs)\n >>> print(whole_sequence_output.shape)\n (32, 10, 4)\n >>> print(final_state.shape)\n (32, 4)\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use. Default: hyperbolic tangent\n (`tanh`). If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use for the recurrent step.\n Default: sigmoid (`sigmoid`). If you pass `None`, no activation is\n applied (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the\n linear transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 (default) will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications. Default: 2.\n reset_after: GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\",\n True = \"after\" (default and CuDNN compatible).\n\n Call arguments:\n inputs: A 2D tensor, with shape of `[batch, feature]`.\n states: A 2D tensor with shape of `[batch, units]`, which is the state from\n the previous time step. For timestep 0, the initial state provided by user\n will be feed to cell.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n ", + "name": "Dot", + "base": "_Merge", + "docstring": "Layer that computes a dot product between samples in two tensors.\n\n E.g. if applied to a list of two tensors `a` and `b` of shape\n `(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)`\n where each entry `i` will be the dot product between\n `a[i]` and `b[i]`.\n\n >>> x = np.arange(10).reshape(1, 5, 2)\n >>> print(x)\n [[[0 1]\n [2 3]\n [4 5]\n [6 7]\n [8 9]]]\n >>> y = np.arange(10, 20).reshape(1, 2, 5)\n >>> print(y)\n [[[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> tf.keras.layers.Dot(axes=(1, 2))([x, y])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> dotted = tf.keras.layers.Dot(axes=1)([x1, x2])\n >>> dotted.shape\n TensorShape([5, 1])\n\n\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "units", + "name": "axes", "default": null }, { - "name": "activation", - "default": "tanh" - }, - { - "name": "recurrent_activation", - "default": "sigmoid" - }, - { - "name": "use_bias", - "default": "True", + "name": "normalize", + "default": "False", "type": "boolean" - }, - { - "name": "kernel_initializer", - "default": "glorot_uniform" - }, - { - "name": "recurrent_initializer", - "default": "orthogonal" - }, - { - "name": "bias_initializer", - "default": "zeros" - }, + } + ], + "abstract": false, + "outputs": [], + "inputs": [ { - "name": "kernel_regularizer", - "default": "None" + "name": "self", + "default": null }, { - "name": "recurrent_regularizer", - "default": "None" - }, + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/merge.py", + "aliases": [] + }, + { + "name": "Dropout", + "base": "Layer", + "docstring": "Applies Dropout to the input.\n\n The Dropout layer randomly sets input units to 0 with a frequency of `rate`\n at each step during training time, which helps prevent overfitting.\n Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over\n all inputs is unchanged.\n\n Note that the Dropout layer only applies when `training` is set to True\n such that no values are dropped during inference. When using `model.fit`,\n `training` will be appropriately set to True automatically, and in other\n contexts, you can set the kwarg explicitly to True when calling the layer.\n\n (This is in contrast to setting `trainable=False` for a Dropout layer.\n `trainable` does not affect the layer's behavior, as Dropout does\n not have any variables/weights that can be frozen during training.)\n\n >>> tf.random.set_seed(0)\n >>> layer = tf.keras.layers.Dropout(.2, input_shape=(2,))\n >>> data = np.arange(10).reshape(5, 2).astype(np.float32)\n >>> print(data)\n [[0. 1.]\n [2. 3.]\n [4. 5.]\n [6. 7.]\n [8. 9.]]\n >>> outputs = layer(data, training=True)\n >>> print(outputs)\n tf.Tensor(\n [[ 0. 1.25]\n [ 2.5 3.75]\n [ 5. 6.25]\n [ 7.5 8.75]\n [10. 0. ]], shape=(5, 2), dtype=float32)\n\n Args:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n noise_shape: 1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.\n seed: A Python integer to use as random seed.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n ", + "arguments": [ { - "name": "bias_regularizer", - "default": "None" + "name": "self", + "default": null }, { - "name": "kernel_constraint", - "default": "None" + "name": "rate", + "default": null }, { - "name": "recurrent_constraint", + "name": "noise_shape", "default": "None" }, { - "name": "bias_constraint", + "name": "seed", "default": "None" - }, - { - "name": "dropout", - "default": 0.0 - }, - { - "name": "recurrent_dropout", - "default": 0.0 - }, - { - "name": "implementation", - "default": 2 - }, - { - "name": "reset_after", - "default": "True", - "type": "boolean" } ], "abstract": false, @@ -2050,30 +2084,26 @@ "name": "inputs", "default": null }, - { - "name": "states", - "default": null - }, { "name": "training", "default": "None" } ], - "file": "tensorflow/python/keras/layers/recurrent_v2.py", + "file": "keras/layers/core.py", "aliases": [] }, { - "name": "GaussianDropout", + "name": "ELU", "base": "Layer", - "docstring": "Apply multiplicative 1-centered Gaussian noise.\n\n As it is a regularization layer, it is only active at training time.\n\n Arguments:\n rate: Float, drop probability (as with `Dropout`).\n The multiplicative noise will have\n standard deviation `sqrt(rate / (1 - rate))`.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", + "docstring": "Exponential Linear Unit.\n\n It follows:\n\n ```\n f(x) = alpha * (exp(x) - 1.) for x < 0\n f(x) = x for x >= 0\n ```\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n alpha: Scale for the negative factor.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "rate", - "default": null + "name": "alpha", + "default": 1.0 } ], "abstract": false, @@ -2086,27 +2116,52 @@ { "name": "inputs", "default": null - }, - { - "name": "training", - "default": "None" } ], - "file": "tensorflow/python/keras/layers/noise.py", + "file": "keras/layers/advanced_activations.py", "aliases": [] }, { - "name": "GaussianNoise", + "name": "Embedding", "base": "Layer", - "docstring": "Apply additive zero-centered Gaussian noise.\n\n This is useful to mitigate overfitting\n (you could see it as a form of random data augmentation).\n Gaussian Noise (GS) is a natural choice as corruption process\n for real valued inputs.\n\n As it is a regularization layer, it is only active at training time.\n\n Arguments:\n stddev: Float, standard deviation of the noise distribution.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding noise) or in inference mode (doing nothing).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", + "docstring": "Turns positive integers (indexes) into dense vectors of fixed size.\n\n e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`\n\n This layer can only be used as the first layer in a model.\n\n Example:\n\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Embedding(1000, 64, input_length=10))\n >>> # The model will take as input an integer matrix of size (batch,\n >>> # input_length), and the largest integer (i.e. word index) in the input\n >>> # should be no larger than 999 (vocabulary size).\n >>> # Now model.output_shape is (None, 10, 64), where `None` is the batch\n >>> # dimension.\n >>> input_array = np.random.randint(1000, size=(32, 10))\n >>> model.compile('rmsprop', 'mse')\n >>> output_array = model.predict(input_array)\n >>> print(output_array.shape)\n (32, 10, 64)\n\n Args:\n input_dim: Integer. Size of the vocabulary,\n i.e. maximum integer index + 1.\n output_dim: Integer. Dimension of the dense embedding.\n embeddings_initializer: Initializer for the `embeddings`\n matrix (see `keras.initializers`).\n embeddings_regularizer: Regularizer function applied to\n the `embeddings` matrix (see `keras.regularizers`).\n embeddings_constraint: Constraint function applied to\n the `embeddings` matrix (see `keras.constraints`).\n mask_zero: Boolean, whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n This is useful when using recurrent layers\n which may take variable length input.\n If this is `True`, then all subsequent layers\n in the model need to support masking or an exception will be raised.\n If mask_zero is set to True, as a consequence, index 0 cannot be\n used in the vocabulary (input_dim should equal size of\n vocabulary + 1).\n input_length: Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n\n Input shape:\n 2D tensor with shape: `(batch_size, input_length)`.\n\n Output shape:\n 3D tensor with shape: `(batch_size, input_length, output_dim)`.\n\n **Note on variable placement:**\n By default, if a GPU is available, the embedding matrix will be placed on\n the GPU. This achieves the best performance, but it might cause issues:\n\n - You may be using an optimizer that does not support sparse GPU kernels.\n In this case you will see an error upon training your model.\n - Your embedding matrix may be too large to fit on your GPU. In this case\n you will see an Out Of Memory (OOM) error.\n\n In such cases, you should place the embedding matrix on the CPU memory.\n You can do so with a device scope, as such:\n\n ```python\n with tf.device('cpu:0'):\n embedding_layer = Embedding(...)\n embedding_layer.build()\n ```\n\n The pre-built `embedding_layer` instance can then be added to a `Sequential`\n model (e.g. `model.add(embedding_layer)`), called in a Functional model\n (e.g. `x = embedding_layer(x)`), or used in a subclassed model.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "stddev", + "name": "input_dim", + "default": null + }, + { + "name": "output_dim", "default": null + }, + { + "name": "embeddings_initializer", + "default": "uniform" + }, + { + "name": "embeddings_regularizer", + "default": "None" + }, + { + "name": "activity_regularizer", + "default": "None" + }, + { + "name": "embeddings_constraint", + "default": "None" + }, + { + "name": "mask_zero", + "default": "False", + "type": "boolean" + }, + { + "name": "input_length", + "default": "None" } ], "abstract": false, @@ -2119,19 +2174,15 @@ { "name": "inputs", "default": null - }, - { - "name": "training", - "default": "None" } ], - "file": "tensorflow/python/keras/layers/noise.py", + "file": "keras/layers/embeddings.py", "aliases": [] }, { - "name": "GlobalAveragePooling1D", - "base": "GlobalPooling1D", - "docstring": "Global average pooling operation for temporal data.\n\n Examples:\n\n >>> input_shape = (2, 3, 4)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.GlobalAveragePooling1D()(x)\n >>> print(y.shape)\n (2, 4)\n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n\n Call arguments:\n inputs: A 3D tensor.\n mask: Binary tensor of shape `(batch_size, steps)` indicating whether\n a given step should be masked (excluded from the average).\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n - If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n\n Output shape:\n 2D tensor with shape `(batch_size, features)`.\n ", + "name": "Flatten", + "base": "Layer", + "docstring": "Flattens the input. Does not affect the batch size.\n\n Note: If inputs are shaped `(batch,)` without a feature axis, then\n flattening adds an extra channel dimension and output shape is `(batch, 1)`.\n\n Args:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Example:\n\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Conv2D(64, 3, 3, input_shape=(3, 32, 32)))\n >>> model.output_shape\n (None, 1, 10, 64)\n\n >>> model.add(Flatten())\n >>> model.output_shape\n (None, 640)\n\n ", "arguments": [ { "name": "self", @@ -2139,166 +2190,7 @@ }, { "name": "data_format", - "default": "channels_last" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null - }, - { - "name": "inputs", - "default": null - }, - { - "name": "mask", - "default": "None" - } - ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "GlobalAvgPool1D" - ] - }, - { - "name": "GlobalAveragePooling2D", - "base": "GlobalPooling2D", - "docstring": "Global average pooling operation for spatial data.\n\n Examples:\n\n >>> input_shape = (2, 4, 5, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.GlobalAveragePooling2D()(x)\n >>> print(y.shape)\n (2, 3)\n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, rows, cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, rows, cols)`.\n\n Output shape:\n 2D tensor with shape `(batch_size, channels)`.\n ", - "arguments": [ - { - "name": "self", - "default": null - }, - { - "name": "data_format", - "default": "None" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null - }, - { - "name": "inputs", - "default": null - } - ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "GlobalAvgPool2D" - ] - }, - { - "name": "GlobalAveragePooling3D", - "base": "GlobalPooling3D", - "docstring": "Global Average pooling operation for 3D data.\n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n\n Output shape:\n 2D tensor with shape `(batch_size, channels)`.\n ", - "arguments": [ - { - "name": "self", - "default": null - }, - { - "name": "data_format", - "default": "None" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null - }, - { - "name": "inputs", - "default": null - } - ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "GlobalAvgPool3D" - ] - }, - { - "name": "GlobalMaxPooling1D", - "base": "GlobalPooling1D", - "docstring": "Global max pooling operation for 1D temporal data.\n\n Downsamples the input representation by taking the maximum value over\n the time dimension.\n\n For example:\n\n >>> x = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])\n >>> x = tf.reshape(x, [3, 3, 1])\n >>> x\n \n >>> max_pool_1d = tf.keras.layers.GlobalMaxPooling1D()\n >>> max_pool_1d(x)\n \n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n - If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n\n Output shape:\n 2D tensor with shape `(batch_size, features)`.\n ", - "arguments": [ - { - "name": "self", - "default": null - }, - { - "name": "data_format", - "default": "channels_last" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null - }, - { - "name": "inputs", - "default": null - } - ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "GlobalMaxPool1D" - ] - }, - { - "name": "GlobalMaxPooling2D", - "base": "GlobalPooling2D", - "docstring": "Global max pooling operation for spatial data.\n\n Examples:\n\n >>> input_shape = (2, 4, 5, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.GlobalMaxPool2D()(x)\n >>> print(y.shape)\n (2, 3)\n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, rows, cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, rows, cols)`.\n\n Output shape:\n 2D tensor with shape `(batch_size, channels)`.\n ", - "arguments": [ - { - "name": "self", - "default": null - }, - { - "name": "data_format", - "default": "None" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null - }, - { - "name": "inputs", - "default": null - } - ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "GlobalMaxPool2D" - ] - }, - { - "name": "GlobalMaxPooling3D", - "base": "GlobalPooling3D", - "docstring": "Global Max pooling operation for 3D data.\n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n\n Output shape:\n 2D tensor with shape `(batch_size, channels)`.\n ", - "arguments": [ - { - "name": "self", - "default": null - }, - { - "name": "data_format", - "default": "None" + "default": "None" } ], "abstract": false, @@ -2313,15 +2205,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "GlobalMaxPool3D" - ] + "file": "keras/layers/core.py", + "aliases": [] }, { - "name": "LSTM", + "name": "GRU", "base": "DropoutRNNCellMixin", - "docstring": "Long Short-Term Memory layer - Hochreiter 1997.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Based on available runtime hardware and constraints, this layer\n will choose different implementations (cuDNN-based or pure-TensorFlow)\n to maximize the performance. If a GPU is available and all\n the arguments to the layer meet the requirement of the CuDNN kernel\n (see below for details), the layer will use a fast cuDNN implementation.\n\n The requirements to use the cuDNN implementation are:\n\n 1. `activation` == `tanh`\n 2. `recurrent_activation` == `sigmoid`\n 3. `recurrent_dropout` == 0\n 4. `unroll` is `False`\n 5. `use_bias` is `True`\n 6. Inputs, if use masking, are strictly right-padded.\n 7. Eager execution is enabled in the outermost context.\n\n For example:\n\n >>> inputs = tf.random.normal([32, 10, 8])\n >>> lstm = tf.keras.layers.LSTM(4)\n >>> output = lstm(inputs)\n >>> print(output.shape)\n (32, 4)\n >>> lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True)\n >>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs)\n >>> print(whole_seq_output.shape)\n (32, 10, 4)\n >>> print(final_memory_state.shape)\n (32, 4)\n >>> print(final_carry_state.shape)\n (32, 4)\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation\n is applied (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use for the recurrent step.\n Default: sigmoid (`sigmoid`). If you pass `None`, no activation is\n applied (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs. Default: `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of\n the forget gate at initialization. Setting it to true will also force\n `bias_initializer=\"zeros\"`. This is recommended in [Jozefowicz et\n al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n implementation: Implementation mode, either 1 or 2. Mode 1 will structure\n its operations as a larger number of smaller dot products and additions,\n whereas mode 2 will batch them into fewer, larger operations. These modes\n will have different performance profiles on different hardware and for\n different applications. Default: 2.\n return_sequences: Boolean. Whether to return the last output. in the output\n sequence, or the full sequence. Default: `False`.\n return_state: Boolean. Whether to return the last state in addition to the\n output. Default: `False`.\n go_backwards: Boolean (default `False`). If True, process the input sequence\n backwards and return the reversed sequence.\n stateful: Boolean (default `False`). If True, the last state for each sample\n at index i in a batch will be used as initial state for the sample of\n index i in the following batch.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `[timesteps, batch, feature]`, whereas in the False case, it will be\n `[batch, timesteps, feature]`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n unroll: Boolean (default `False`). If True, the network will be unrolled,\n else a symbolic loop will be used. Unrolling can speed-up a RNN, although\n it tends to be more memory-intensive. Unrolling is only suitable for short\n sequences.\n\n Call arguments:\n inputs: A 3D tensor with shape `[batch, timesteps, feature]`.\n mask: Binary tensor of shape `[batch, timesteps]` indicating whether\n a given timestep should be masked (optional, defaults to `None`).\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used (optional, defaults to `None`).\n initial_state: List of initial state tensors to be passed to the first\n call of the cell (optional, defaults to `None` which causes creation\n of zero-filled initial state tensors).\n ", + "docstring": "Gated Recurrent Unit - Cho et al. 2014.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Based on available runtime hardware and constraints, this layer\n will choose different implementations (cuDNN-based or pure-TensorFlow)\n to maximize the performance. If a GPU is available and all\n the arguments to the layer meet the requirement of the CuDNN kernel\n (see below for details), the layer will use a fast cuDNN implementation.\n\n The requirements to use the cuDNN implementation are:\n\n 1. `activation` == `tanh`\n 2. `recurrent_activation` == `sigmoid`\n 3. `recurrent_dropout` == 0\n 4. `unroll` is `False`\n 5. `use_bias` is `True`\n 6. `reset_after` is `True`\n 7. Inputs, if use masking, are strictly right-padded.\n 8. Eager execution is enabled in the outermost context.\n\n There are two variants of the GRU implementation. The default one is based on\n [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden\n state before matrix multiplication. The other one is based on\n [original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.\n\n The second variant is compatible with CuDNNGRU (GPU-only) and allows\n inference on CPU. Thus it has separate biases for `kernel` and\n `recurrent_kernel`. To use this variant, set `'reset_after'=True` and\n `recurrent_activation='sigmoid'`.\n\n For example:\n\n >>> inputs = tf.random.normal([32, 10, 8])\n >>> gru = tf.keras.layers.GRU(4)\n >>> output = gru(inputs)\n >>> print(output.shape)\n (32, 4)\n >>> gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True)\n >>> whole_sequence_output, final_state = gru(inputs)\n >>> print(whole_sequence_output.shape)\n (32, 10, 4)\n >>> print(final_state.shape)\n (32, 4)\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: sigmoid (`sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent\n state. Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.\n return_state: Boolean. Whether to return the last state in addition to the\n output. Default: `False`.\n go_backwards: Boolean (default `False`).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `[timesteps, batch, feature]`, whereas in the False case, it will be\n `[batch, timesteps, feature]`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n reset_after: GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\",\n True = \"after\" (default and CuDNN compatible).\n\n Call arguments:\n inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.\n mask: Binary tensor of shape `[samples, timesteps]` indicating whether\n a given timestep should be masked (optional, defaults to `None`).\n An individual `True` entry indicates that the corresponding timestep\n should be utilized, while a `False` entry indicates that the\n corresponding timestep should be ignored.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used (optional, defaults to `None`).\n initial_state: List of initial state tensors to be passed to the first\n call of the cell (optional, defaults to `None` which causes creation\n of zero-filled initial state tensors).\n ", "arguments": [ { "name": "self", @@ -2356,11 +2246,6 @@ "name": "bias_initializer", "default": "zeros" }, - { - "name": "unit_forget_bias", - "default": "True", - "type": "boolean" - }, { "name": "kernel_regularizer", "default": "None" @@ -2397,10 +2282,6 @@ "name": "recurrent_dropout", "default": 0.0 }, - { - "name": "implementation", - "default": 2 - }, { "name": "return_sequences", "default": "False", @@ -2422,14 +2303,19 @@ "type": "boolean" }, { - "name": "time_major", + "name": "unroll", "default": "False", "type": "boolean" }, { - "name": "unroll", + "name": "time_major", "default": "False", "type": "boolean" + }, + { + "name": "reset_after", + "default": "True", + "type": "boolean" } ], "abstract": false, @@ -2456,13 +2342,13 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/recurrent_v2.py", + "file": "keras/layers/recurrent_v2.py", "aliases": [] }, { - "name": "LSTMCell", - "base": "LSTMCell", - "docstring": "Cell class for the LSTM layer.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This class processes one step within the whole time sequence input, whereas\n `tf.keras.layer.LSTM` processes the whole sequence.\n\n For example:\n\n >>> inputs = tf.random.normal([32, 10, 8])\n >>> rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4))\n >>> output = rnn(inputs)\n >>> print(output.shape)\n (32, 4)\n >>> rnn = tf.keras.layers.RNN(\n ... tf.keras.layers.LSTMCell(4),\n ... return_sequences=True,\n ... return_state=True)\n >>> whole_seq_output, final_memory_state, final_carry_state = rnn(inputs)\n >>> print(whole_seq_output.shape)\n (32, 10, 4)\n >>> print(final_memory_state.shape)\n (32, 4)\n >>> print(final_carry_state.shape)\n (32, 4)\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use. Default: hyperbolic tangent\n (`tanh`). If you pass `None`, no activation is applied (ie. \"linear\"\n activation: `a(x) = x`).\n recurrent_activation: Activation function to use for the recurrent step.\n Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs. Default: `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of\n the forget gate at initialization. Setting it to true will also force\n `bias_initializer=\"zeros\"`. This is recommended in [Jozefowicz et\n al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of smaller dot\n products and additions, whereas mode 2 (default) will batch them into\n fewer, larger operations. These modes will have different performance\n profiles on different hardware and for different applications. Default: 2.\n\n Call arguments:\n inputs: A 2D tensor, with shape of `[batch, feature]`.\n states: List of 2 tensors that corresponding to the cell's units. Both of\n them have shape `[batch, units]`, the first tensor is the memory state\n from previous time step, the second tensor is the carry state from\n previous time step. For timestep 0, the initial state provided by user\n will be feed to cell.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n ", + "name": "GRUCell", + "base": "GRUCell", + "docstring": "Cell class for the GRU layer.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This class processes one step within the whole time sequence input, whereas\n `tf.keras.layer.GRU` processes the whole sequence.\n\n For example:\n\n >>> inputs = tf.random.normal([32, 10, 8])\n >>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4))\n >>> output = rnn(inputs)\n >>> print(output.shape)\n (32, 4)\n >>> rnn = tf.keras.layers.RNN(\n ... tf.keras.layers.GRUCell(4),\n ... return_sequences=True,\n ... return_state=True)\n >>> whole_sequence_output, final_state = rnn(inputs)\n >>> print(whole_sequence_output.shape)\n (32, 10, 4)\n >>> print(final_state.shape)\n (32, 4)\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use. Default: hyperbolic tangent\n (`tanh`). If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use for the recurrent step.\n Default: sigmoid (`sigmoid`). If you pass `None`, no activation is\n applied (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the\n linear transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n reset_after: GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\",\n True = \"after\" (default and CuDNN compatible).\n\n Call arguments:\n inputs: A 2D tensor, with shape of `[batch, feature]`.\n states: A 2D tensor with shape of `[batch, units]`, which is the state from\n the previous time step. For timestep 0, the initial state provided by user\n will be feed to cell.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n ", "arguments": [ { "name": "self", @@ -2497,11 +2383,6 @@ "name": "bias_initializer", "default": "zeros" }, - { - "name": "unit_forget_bias", - "default": "True", - "type": "boolean" - }, { "name": "kernel_regularizer", "default": "None" @@ -2535,8 +2416,9 @@ "default": 0.0 }, { - "name": "implementation", - "default": 2 + "name": "reset_after", + "default": "True", + "type": "boolean" } ], "abstract": false, @@ -2559,33 +2441,54 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/recurrent_v2.py", + "file": "keras/layers/recurrent_v2.py", "aliases": [] }, { - "name": "Lambda", + "name": "GaussianDropout", "base": "Layer", - "docstring": "Wraps arbitrary expressions as a `Layer` object.\n\n The `Lambda` layer exists so that arbitrary TensorFlow functions\n can be used when constructing `Sequential` and Functional API\n models. `Lambda` layers are best suited for simple operations or\n quick experimentation. For more advanced use cases, follow\n [this guide](https://www.tensorflow.org/guide/keras/custom_layers_and_models)\n for subclassing `tf.keras.layers.Layer`.\n\n The main reason to subclass `tf.keras.layers.Layer` instead of using a\n `Lambda` layer is saving and inspecting a Model. `Lambda` layers\n are saved by serializing the Python bytecode, whereas subclassed\n Layers can be saved via overriding their `get_config` method. Overriding\n `get_config` improves the portability of Models. Models that rely on\n subclassed Layers are also often easier to visualize and reason about.\n\n Examples:\n\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\n model.add(Lambda(antirectifier))\n ```\n\n Variables:\n While it is possible to use Variables with Lambda layers, this practice is\n discouraged as it can easily lead to bugs. For instance, consider the\n following layer:\n\n ```python\n scale = tf.Variable(1.)\n scale_layer = tf.keras.layers.Lambda(lambda x: x * scale)\n ```\n\n Because scale_layer does not directly track the `scale` variable, it will\n not appear in `scale_layer.trainable_weights` and will therefore not be\n trained if `scale_layer` is used in a Model.\n\n A better pattern is to write a subclassed Layer:\n\n ```python\n class ScaleLayer(tf.keras.layers.Layer):\n def __init__(self):\n super(ScaleLayer, self).__init__()\n self.scale = tf.Variable(1.)\n\n def call(self, inputs):\n return inputs * self.scale\n ```\n\n In general, Lambda layers can be convenient for simple stateless\n computation, but anything more complex should use a subclass Layer instead.\n\n Arguments:\n function: The function to be evaluated. Takes input tensor as first\n argument.\n output_shape: Expected output shape from function. This argument can be\n inferred if not explicitly provided. Can be a tuple or function. If a\n tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input: `output_shape =\n (input_shape[0], ) + output_shape` or, the input is `None` and\n the sample dimension is also `None`: `output_shape = (None, ) +\n output_shape` If a function, it specifies the entire shape as a function\n of the\n input shape: `output_shape = f(input_shape)`\n mask: Either None (indicating no masking) or a callable with the same\n signature as the `compute_mask` layer method, or a tensor that will be\n returned as output mask regardless of what the input is.\n arguments: Optional dictionary of keyword arguments to be passed to the\n function.\n\n Input shape:\n Arbitrary. Use the keyword argument input_shape (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n\n Output shape:\n Specified by `output_shape` argument\n ", + "docstring": "Apply multiplicative 1-centered Gaussian noise.\n\n As it is a regularization layer, it is only active at training time.\n\n Args:\n rate: Float, drop probability (as with `Dropout`).\n The multiplicative noise will have\n standard deviation `sqrt(rate / (1 - rate))`.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "function", + "name": "rate", + "default": null + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", "default": null }, { - "name": "output_shape", - "default": "None" + "name": "inputs", + "default": null }, { - "name": "mask", + "name": "training", "default": "None" + } + ], + "file": "keras/layers/noise.py", + "aliases": [] + }, + { + "name": "GaussianNoise", + "base": "Layer", + "docstring": "Apply additive zero-centered Gaussian noise.\n\n This is useful to mitigate overfitting\n (you could see it as a form of random data augmentation).\n Gaussian Noise (GS) is a natural choice as corruption process\n for real valued inputs.\n\n As it is a regularization layer, it is only active at training time.\n\n Args:\n stddev: Float, standard deviation of the noise distribution.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding noise) or in inference mode (doing nothing).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n ", + "arguments": [ + { + "name": "self", + "default": null }, { - "name": "arguments", - "default": "None" + "name": "stddev", + "default": null } ], "abstract": false, @@ -2599,77 +2502,174 @@ "name": "inputs", "default": null }, - { - "name": "mask", - "default": "None" - }, { "name": "training", "default": "None" } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/noise.py", "aliases": [] }, { - "name": "LayerNormalization", - "base": "Layer", - "docstring": "Layer normalization layer (Ba et al., 2016).\n\n Normalize the activations of the previous layer for each given example in a\n batch independently, rather than across a batch like Batch Normalization.\n i.e. applies a transformation that maintains the mean activation within each\n example close to 0 and the activation standard deviation close to 1.\n\n Given a tensor `inputs`, moments are calculated and normalization\n is performed across the axes specified in `axis`.\n\n Example:\n\n >>> data = tf.constant(np.arange(10).reshape(5, 2) * 10, dtype=tf.float32)\n >>> print(data)\n tf.Tensor(\n [[ 0. 10.]\n [20. 30.]\n [40. 50.]\n [60. 70.]\n [80. 90.]], shape=(5, 2), dtype=float32)\n\n >>> layer = tf.keras.layers.LayerNormalization(axis=1)\n >>> output = layer(data)\n >>> print(output)\n tf.Tensor(\n [[-1. 1.]\n [-1. 1.]\n [-1. 1.]\n [-1. 1.]\n [-1. 1.]], shape=(5, 2), dtype=float32)\n\n Notice that with Layer Normalization the normalization happens across the\n axes *within* each example, rather than across different examples in the\n batch.\n\n If `scale` or `center` are enabled, the layer will scale the normalized\n outputs by broadcasting them with a trainable variable `gamma`, and center\n the outputs by broadcasting with a trainable variable `beta`. `gamma` will\n default to a ones tensor and `beta` will default to a zeros tensor, so that\n centering and scaling are no-ops before training has begun.\n\n So, with scaling and centering enabled the normalization equations\n are as follows:\n Let the intermediate activations for a mini-batch to be the `inputs`.\n\n For each sample `x_i` in `inputs` with `k` features, we compute the mean and\n variance of the sample:\n\n ```python\n mean_i = sum(x_i[j] for j in range(k)) / k\n var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k\n ```\n\n and then compute a normalized `x_i_normalized`, including a small factor\n `epsilon` for numerical stability.\n\n ```python\n x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)\n ```\n\n And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,\n which are learned parameters:\n\n ```python\n output_i = x_i_normalized * gamma + beta\n ```\n\n `gamma` and `beta` will span the axes of `inputs` specified in `axis`, and\n this part of the inputs' shape must be fully defined.\n\n For example:\n\n >>> layer = tf.keras.layers.LayerNormalization(axis=[1, 2, 3])\n >>> layer.build([5, 20, 30, 40])\n >>> print(layer.beta.shape)\n (20, 30, 40)\n >>> print(layer.gamma.shape)\n (20, 30, 40)\n\n Note that other implementations of layer normalization may choose to define\n `gamma` and `beta` over a separate set of axes from the axes being\n normalized across. For example, Group Normalization\n ([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1\n corresponds to a Layer Normalization that normalizes across height, width,\n and channel and has `gamma` and `beta` span only the channel dimension.\n So, this Layer Normalization implementation will not match a Group\n Normalization layer with group size set to 1.\n\n\n Arguments:\n axis: Integer or List/Tuple. The axis or axes to normalize across. Typically\n this is the features axis/axes. The left-out axes are typically the batch\n axis/axes. This argument defaults to `-1`, the last dimension in the\n input.\n epsilon: Small float added to variance to avoid dividing by zero. Defaults\n to 1e-3\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored. Defaults to True.\n scale: If True, multiply by `gamma`. If False, `gamma` is not used. Defaults\n to True. When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling will be done by the next layer.\n beta_initializer: Initializer for the beta weight. Defaults to zeros.\n gamma_initializer: Initializer for the gamma weight. Defaults to ones.\n beta_regularizer: Optional regularizer for the beta weight. None by default.\n gamma_regularizer: Optional regularizer for the gamma weight. None by\n default.\n beta_constraint: Optional constraint for the beta weight. None by default.\n gamma_constraint: Optional constraint for the gamma weight. None by default.\n trainable: Boolean, if `True` the variables will be marked as trainable.\n Defaults to True.\n Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n Output shape: Same shape as input.\n Reference:\n - [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).\n ", + "name": "GlobalAveragePooling1D", + "base": "GlobalPooling1D", + "docstring": "Global average pooling operation for temporal data.\n\n Examples:\n\n >>> input_shape = (2, 3, 4)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.GlobalAveragePooling1D()(x)\n >>> print(y.shape)\n (2, 4)\n\n Args:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n keepdims: A boolean, whether to keep the temporal dimension or not.\n If `keepdims` is `False` (default), the rank of the tensor is reduced\n for spatial dimensions.\n If `keepdims` is `True`, the temporal dimension are retained with\n length 1.\n The behavior is the same as for `tf.reduce_mean` or `np.mean`.\n\n Call arguments:\n inputs: A 3D tensor.\n mask: Binary tensor of shape `(batch_size, steps)` indicating whether\n a given step should be masked (excluded from the average).\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n - If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n\n Output shape:\n - If `keepdims`=False:\n 2D tensor with shape `(batch_size, features)`.\n - If `keepdims`=True:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, 1, features)`\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, 1)`\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "axis", - "default": -1 - }, - { - "name": "epsilon", - "default": 0.001 - }, - { - "name": "center", - "default": "True", - "type": "boolean" - }, + "name": "data_format", + "default": "channels_last" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ { - "name": "scale", - "default": "True", - "type": "boolean" + "name": "self", + "default": null }, { - "name": "beta_initializer", - "default": "zeros" + "name": "inputs", + "default": null }, { - "name": "gamma_initializer", - "default": "ones" + "name": "mask", + "default": "None" + } + ], + "file": "keras/layers/pooling.py", + "aliases": [ + "GlobalAvgPool1D" + ] + }, + { + "name": "GlobalAveragePooling2D", + "base": "GlobalPooling2D", + "docstring": "Global average pooling operation for spatial data.\n\n Examples:\n\n >>> input_shape = (2, 4, 5, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.GlobalAveragePooling2D()(x)\n >>> print(y.shape)\n (2, 3)\n\n Args:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n keepdims: A boolean, whether to keep the spatial dimensions or not.\n If `keepdims` is `False` (default), the rank of the tensor is reduced\n for spatial dimensions.\n If `keepdims` is `True`, the spatial dimensions are retained with\n length 1.\n The behavior is the same as for `tf.reduce_mean` or `np.mean`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, rows, cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, rows, cols)`.\n\n Output shape:\n - If `keepdims`=False:\n 2D tensor with shape `(batch_size, channels)`.\n - If `keepdims`=True:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, 1, 1, channels)`\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, 1, 1)`\n ", + "arguments": [ + { + "name": "self", + "default": null }, { - "name": "beta_regularizer", + "name": "data_format", "default": "None" }, { - "name": "gamma_regularizer", - "default": "None" + "name": "keepdims", + "default": "False", + "type": "boolean" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null }, { - "name": "beta_constraint", - "default": "None" + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/pooling.py", + "aliases": [ + "GlobalAvgPool2D" + ] + }, + { + "name": "GlobalAveragePooling3D", + "base": "GlobalPooling3D", + "docstring": "Global Average pooling operation for 3D data.\n\n Args:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n keepdims: A boolean, whether to keep the spatial dimensions or not.\n If `keepdims` is `False` (default), the rank of the tensor is reduced\n for spatial dimensions.\n If `keepdims` is `True`, the spatial dimensions are retained with\n length 1.\n The behavior is the same as for `tf.reduce_mean` or `np.mean`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n\n Output shape:\n - If `keepdims`=False:\n 2D tensor with shape `(batch_size, channels)`.\n - If `keepdims`=True:\n - If `data_format='channels_last'`:\n 5D tensor with shape `(batch_size, 1, 1, 1, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape `(batch_size, channels, 1, 1, 1)`\n ", + "arguments": [ + { + "name": "self", + "default": null }, { - "name": "gamma_constraint", + "name": "data_format", "default": "None" }, { - "name": "trainable", - "default": "True", + "name": "keepdims", + "default": "False", "type": "boolean" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null }, { - "name": "name", + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/pooling.py", + "aliases": [ + "GlobalAvgPool3D" + ] + }, + { + "name": "GlobalMaxPooling1D", + "base": "GlobalPooling1D", + "docstring": "Global max pooling operation for 1D temporal data.\n\n Downsamples the input representation by taking the maximum value over\n the time dimension.\n\n For example:\n\n >>> x = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])\n >>> x = tf.reshape(x, [3, 3, 1])\n >>> x\n \n >>> max_pool_1d = tf.keras.layers.GlobalMaxPooling1D()\n >>> max_pool_1d(x)\n \n\n Args:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n keepdims: A boolean, whether to keep the temporal dimension or not.\n If `keepdims` is `False` (default), the rank of the tensor is reduced\n for spatial dimensions.\n If `keepdims` is `True`, the temporal dimension are retained with\n length 1.\n The behavior is the same as for `tf.reduce_max` or `np.max`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n - If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n\n Output shape:\n - If `keepdims`=False:\n 2D tensor with shape `(batch_size, features)`.\n - If `keepdims`=True:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, 1, features)`\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, 1)`\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "data_format", + "default": "channels_last" + }, + { + "name": "keepdims", + "default": "False", + "type": "boolean" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/pooling.py", + "aliases": [ + "GlobalMaxPool1D" + ] + }, + { + "name": "GlobalMaxPooling2D", + "base": "GlobalPooling2D", + "docstring": "Global max pooling operation for spatial data.\n\n Examples:\n\n >>> input_shape = (2, 4, 5, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.GlobalMaxPool2D()(x)\n >>> print(y.shape)\n (2, 3)\n\n Args:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n keepdims: A boolean, whether to keep the spatial dimensions or not.\n If `keepdims` is `False` (default), the rank of the tensor is reduced\n for spatial dimensions.\n If `keepdims` is `True`, the spatial dimensions are retained with\n length 1.\n The behavior is the same as for `tf.reduce_max` or `np.max`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, rows, cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, rows, cols)`.\n\n Output shape:\n - If `keepdims`=False:\n 2D tensor with shape `(batch_size, channels)`.\n - If `keepdims`=True:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, 1, 1, channels)`\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, 1, 1)`\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "data_format", "default": "None" + }, + { + "name": "keepdims", + "default": "False", + "type": "boolean" } ], "abstract": false, @@ -2684,21 +2684,28 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/normalization.py", - "aliases": [] + "file": "keras/layers/pooling.py", + "aliases": [ + "GlobalMaxPool2D" + ] }, { - "name": "LeakyReLU", - "base": "Layer", - "docstring": "Leaky version of a Rectified Linear Unit.\n\n It allows a small gradient when the unit is not active:\n\n ```\n f(x) = alpha * x if x < 0\n f(x) = x if x >= 0\n ```\n\n Usage:\n\n >>> layer = tf.keras.layers.LeakyReLU()\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [-0.9, -0.3, 0.0, 2.0]\n >>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [-0.3, -0.1, 0.0, 2.0]\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Arguments:\n alpha: Float >= 0. Negative slope coefficient. Default to 0.3.\n\n ", + "name": "GlobalMaxPooling3D", + "base": "GlobalPooling3D", + "docstring": "Global Max pooling operation for 3D data.\n\n Args:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n keepdims: A boolean, whether to keep the spatial dimensions or not.\n If `keepdims` is `False` (default), the rank of the tensor is reduced\n for spatial dimensions.\n If `keepdims` is `True`, the spatial dimensions are retained with\n length 1.\n The behavior is the same as for `tf.reduce_max` or `np.max`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n\n Output shape:\n - If `keepdims`=False:\n 2D tensor with shape `(batch_size, channels)`.\n - If `keepdims`=True:\n - If `data_format='channels_last'`:\n 5D tensor with shape `(batch_size, 1, 1, 1, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape `(batch_size, channels, 1, 1, 1)`\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "alpha", - "default": 0.3 + "name": "data_format", + "default": "None" + }, + { + "name": "keepdims", + "default": "False", + "type": "boolean" } ], "abstract": false, @@ -2713,41 +2720,132 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/advanced_activations.py", - "aliases": [] + "file": "keras/layers/pooling.py", + "aliases": [ + "GlobalMaxPool3D" + ] }, { - "name": "LocallyConnected1D", + "name": "Hashing", "base": "Layer", - "docstring": "Locally-connected layer for 1D inputs.\n\n The `LocallyConnected1D` layer works similarly to\n the `Conv1D` layer, except that weights are unshared,\n that is, a different set of filters is applied at each different patch\n of the input.\n\n Note: layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n\n Example:\n ```python\n # apply a unshared weight convolution 1d of length 3 to a sequence with\n # 10 timesteps, with 64 output filters\n model = Sequential()\n model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))\n # now model.output_shape == (None, 8, 64)\n # add a new conv1d on top\n model.add(LocallyConnected1D(32, 3))\n # now model.output_shape == (None, 6, 32)\n ```\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: Currently only supports `\"valid\"` (case-insensitive).\n `\"same\"` may be supported in the future.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, length, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, length)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to the kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n implementation: implementation mode, either `1`, `2`, or `3`.\n `1` loops over input spatial locations to perform the forward pass.\n It is memory-efficient but performs a lot of (small) ops.\n\n `2` stores layer weights in a dense but sparsely-populated 2D matrix\n and implements the forward pass as a single matrix-multiply. It uses\n a lot of RAM but performs few (large) ops.\n\n `3` stores layer weights in a sparse tensor and implements the forward\n pass as a single sparse matrix-multiply.\n\n How to choose:\n\n `1`: large, dense models,\n `2`: small models,\n `3`: large, sparse models,\n\n where \"large\" stands for large input/output activations\n (i.e. many `filters`, `input_filters`, large `input_size`,\n `output_size`), and \"sparse\" stands for few connections between inputs\n and outputs, i.e. small ratio\n `filters * input_filters * kernel_size / (input_size * strides)`,\n where inputs to and outputs of the layer are assumed to have shapes\n `(input_size, input_filters)`, `(output_size, filters)`\n respectively.\n\n It is recommended to benchmark each in the setting of interest to pick\n the most efficient one (in terms of speed and memory usage). Correct\n choice of implementation can lead to dramatic speed improvements (e.g.\n 50X), potentially at the expense of RAM.\n\n Also, only `padding=\"valid\"` is supported by `implementation=1`.\n\n Input shape:\n 3D tensor with shape: `(batch_size, steps, input_dim)`\n\n Output shape:\n 3D tensor with shape: `(batch_size, new_steps, filters)`\n `steps` value might have changed due to padding or strides.\n ", + "docstring": "Implements categorical feature hashing, also known as \"hashing trick\".\n\n This layer transforms single or multiple categorical inputs to hashed output.\n It converts a sequence of int or string to a sequence of int. The stable hash\n function uses `tensorflow::ops::Fingerprint` to produce the same output\n consistently across all platforms.\n\n This layer uses [FarmHash64](https://github.com/google/farmhash) by default,\n which provides a consistent hashed output across different platforms and is\n stable across invocations, regardless of device and context, by mixing the\n input bits thoroughly.\n\n If you want to obfuscate the hashed output, you can also pass a random `salt`\n argument in the constructor. In that case, the layer will use the\n [SipHash64](https://github.com/google/highwayhash) hash function, with\n the `salt` value serving as additional input to the hash function.\n\n **Example (FarmHash64)**\n\n >>> layer = tf.keras.layers.Hashing(num_bins=3)\n >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]\n >>> layer(inp)\n \n\n **Example (FarmHash64) with a mask value**\n\n >>> layer = tf.keras.layers.Hashing(num_bins=3, mask_value='')\n >>> inp = [['A'], ['B'], [''], ['C'], ['D']]\n >>> layer(inp)\n \n\n **Example (SipHash64)**\n\n >>> layer = tf.keras.layers.Hashing(num_bins=3, salt=[133, 137])\n >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]\n >>> layer(inp)\n \n\n **Example (Siphash64 with a single integer, same as `salt=[133, 133]`)**\n\n >>> layer = tf.keras.layers.Hashing(num_bins=3, salt=133)\n >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]\n >>> layer(inp)\n \n\n Args:\n num_bins: Number of hash bins. Note that this includes the `mask_value` bin,\n so the effective number of bins is `(num_bins - 1)` if `mask_value` is\n set.\n mask_value: A value that represents masked inputs, which are mapped to\n index 0. Defaults to None, meaning no mask term will be added and the\n hashing will start at index 0.\n salt: A single unsigned integer or None.\n If passed, the hash function used will be SipHash64, with these values\n used as an additional input (known as a \"salt\" in cryptography).\n These should be non-zero. Defaults to `None` (in that\n case, the FarmHash64 hash function is used). It also supports\n tuple/list of 2 unsigned integer numbers, see reference paper for details.\n **kwargs: Keyword arguments to construct a layer.\n\n Input shape:\n A single or list of string, int32 or int64 `Tensor`,\n `SparseTensor` or `RaggedTensor` of shape `(batch_size, ...,)`\n\n Output shape:\n An int64 `Tensor`, `SparseTensor` or `RaggedTensor` of shape\n `(batch_size, ...)`. If any input is `RaggedTensor` then output is\n `RaggedTensor`, otherwise if any input is `SparseTensor` then output is\n `SparseTensor`, otherwise the output is `Tensor`.\n\n Reference:\n - [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)\n\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "filters", + "name": "num_bins", "default": null }, { - "name": "kernel_size", + "name": "mask_value", + "default": "None" + }, + { + "name": "salt", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", "default": null }, { - "name": "strides", + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/preprocessing/hashing.py", + "aliases": [] + }, + { + "name": "IntegerLookup", + "base": "IndexLookup", + "docstring": "Reindex integer inputs to be in a contiguous range, via a dict lookup.\n\n This layer maps a set of arbitrary integer input tokens into indexed\n integer output via a table-based vocabulary lookup. The layer's output indices\n will be contiguously arranged up to the maximum vocab size, even if the input\n tokens are non-continguous or unbounded. The layer supports multiple options\n for encoding the output via `output_mode`, and has optional support for\n out-of-vocabulary (OOV) tokens and masking.\n\n The vocabulary for the layer can be supplied on construction or learned via\n `adapt()`. During `adapt()`, the layer will analyze a data set, determine the\n frequency of individual integer tokens, and create a vocabulary from them. If\n the vocabulary is capped in size, the most frequent tokens will be used to\n create the vocabulary and all others will be treated as OOV.\n\n There are two possible output modes for the layer.\n When `output_mode` is `\"int\"`,\n input integers are converted to their index in the vocabulary (an integer).\n When `output_mode` is `\"multi_hot\"`, `\"count\"`, or `\"tf_idf\"`, input integers\n are encoded into an array where each dimension corresponds to an element in\n the vocabulary.\n\n The vocabulary can optionally contain a mask token as well as an OOV token\n (which can optionally occupy multiple indices in the vocabulary, as set\n by `num_oov_indices`).\n The position of these tokens in the vocabulary is fixed. When `output_mode` is\n `\"int\"`, the vocabulary will begin with the mask token at index 0, followed by\n OOV indices, followed by the rest of the vocabulary. When `output_mode` is\n `\"multi_hot\"`, `\"count\"`, or `\"tf_idf\"` the vocabulary will begin with OOV\n indices and instances of the mask token will be dropped.\n\n Args:\n max_tokens: The maximum size of the vocabulary for this layer. If None,\n there is no cap on the size of the vocabulary. Note that this size\n includes the OOV and mask tokens. Default to None.\n num_oov_indices: The number of out-of-vocabulary tokens to use. If this\n value is more than 1, OOV inputs are modulated to determine their OOV\n value. If this value is 0, OOV inputs will cause an error when calling the\n layer. Defaults to 1.\n mask_token: An integer token that represents masked inputs. When\n `output_mode` is `\"int\"`, the token is included in vocabulary and mapped\n to index 0. In other output modes, the token will not appear in the\n vocabulary and instances of the mask token in the input will be dropped.\n If set to None, no mask term will be added. Defaults to None.\n oov_token: Only used when `invert` is True. The token to return for OOV\n indices. Defaults to -1.\n vocabulary: Optional. Either an array of integers or a string path to a text\n file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D\n tensor containing the integer vocbulary terms. If passing a file path, the\n file should contain one line per term in the vocabulary. If this argument\n is set, there is no need to `adapt` the layer.\n invert: Only valid when `output_mode` is `\"int\"`. If True, this layer will\n map indices to vocabulary items instead of mapping vocabulary items to\n indices. Default to False.\n output_mode: Specification for the output of the layer. Defaults to `\"int\"`.\n Values can be `\"int\"`, `\"one_hot\"`, `\"multi_hot\"`, `\"count\"`, or\n `\"tf_idf\"` configuring the layer as follows:\n - `\"int\"`: Return the vocabulary indices of the input tokens.\n - `\"one_hot\"`: Encodes each individual element in the input into an\n array the same size as the vocabulary, containing a 1 at the element\n index. If the last dimension is size 1, will encode on that dimension.\n If the last dimension is not size 1, will append a new dimension for\n the encoded output.\n - `\"multi_hot\"`: Encodes each sample in the input into a single array\n the same size as the vocabulary, containing a 1 for each vocabulary\n term present in the sample. Treats the last dimension as the sample\n dimension, if input shape is (..., sample_length), output shape will\n be (..., num_tokens).\n - `\"count\"`: As `\"multi_hot\"`, but the int array contains a count of the\n number of times the token at that index appeared in the sample.\n - `\"tf_idf\"`: As `\"multi_hot\"`, but the TF-IDF algorithm is applied to\n find the value in each token slot.\n For `\"int\"` output, any shape of input and output is supported. For all\n other output modes, currently only output up to rank 2 is supported.\n pad_to_max_tokens: Only applicable when `output_mode` is `\"multi_hot\"`,\n `\"count\"`, or `\"tf_idf\"`. If True, the output will have its feature axis\n padded to `max_tokens` even if the number of unique tokens in the\n vocabulary is less than max_tokens, resulting in a tensor of shape\n [batch_size, max_tokens] regardless of vocabulary size. Defaults to False.\n sparse: Boolean. Only applicable when `output_mode` is `\"multi_hot\"`,\n `\"count\"`, or `\"tf_idf\"`. If True, returns a `SparseTensor` instead of a\n dense `Tensor`. Defaults to False.\n\n Examples:\n\n **Creating a lookup layer with a known vocabulary**\n\n This example creates a lookup layer with a pre-existing vocabulary.\n\n >>> vocab = [12, 36, 1138, 42]\n >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]]) # Note OOV tokens\n >>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab)\n >>> layer(data)\n \n\n **Creating a lookup layer with an adapted vocabulary**\n\n This example creates a lookup layer and generates the vocabulary by analyzing\n the dataset.\n\n >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]])\n >>> layer = tf.keras.layers.IntegerLookup()\n >>> layer.adapt(data)\n >>> layer.get_vocabulary()\n [-1, 42, 1138, 1000, 36, 12]\n\n Note that the OOV token -1 have been added to the vocabulary. The remaining\n tokens are sorted by frequency (42, which has 2 occurrences, is first) then\n by inverse sort order.\n\n >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]])\n >>> layer = tf.keras.layers.IntegerLookup()\n >>> layer.adapt(data)\n >>> layer(data)\n \n\n\n **Lookups with multiple OOV indices**\n\n This example demonstrates how to use a lookup layer with multiple OOV indices.\n When a layer is created with more than one OOV index, any OOV tokens are\n hashed into the number of OOV buckets, distributing OOV tokens in a\n deterministic fashion across the set.\n\n >>> vocab = [12, 36, 1138, 42]\n >>> data = tf.constant([[12, 1138, 42], [37, 1000, 36]])\n >>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab, num_oov_indices=2)\n >>> layer(data)\n \n\n Note that the output for OOV token 37 is 1, while the output for OOV token\n 1000 is 0. The in-vocab terms have their output index increased by 1 from\n earlier examples (12 maps to 2, etc) in order to make space for the extra OOV\n token.\n\n **One-hot output**\n\n Configure the layer with `output_mode='one_hot'`. Note that the first\n `num_oov_indices` dimensions in the ont_hot encoding represent OOV values.\n\n >>> vocab = [12, 36, 1138, 42]\n >>> data = tf.constant([12, 36, 1138, 42, 7]) # Note OOV tokens\n >>> layer = tf.keras.layers.IntegerLookup(\n ... vocabulary=vocab, output_mode='one_hot')\n >>> layer(data)\n \n\n **Multi-hot output**\n\n Configure the layer with `output_mode='multi_hot'`. Note that the first\n `num_oov_indices` dimensions in the multi_hot encoding represent OOV tokens\n\n >>> vocab = [12, 36, 1138, 42]\n >>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens\n >>> layer = tf.keras.layers.IntegerLookup(\n ... vocabulary=vocab, output_mode='multi_hot')\n >>> layer(data)\n \n\n **Token count output**\n\n Configure the layer with `output_mode='count'`. As with multi_hot output, the\n first `num_oov_indices` dimensions in the output represent OOV tokens.\n\n >>> vocab = [12, 36, 1138, 42]\n >>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens\n >>> layer = tf.keras.layers.IntegerLookup(\n ... vocabulary=vocab, output_mode='count')\n >>> layer(data)\n \n\n **TF-IDF output**\n\n Configure the layer with `output_mode='tf_idf'`. As with multi_hot output, the\n first `num_oov_indices` dimensions in the output represent OOV tokens.\n\n Each token bin will output `token_count * idf_weight`, where the idf weights\n are the inverse document frequency weights per token. These should be provided\n along with the vocabulary. Note that the `idf_weight` for OOV tokens will\n default to the average of all idf weights passed in.\n\n >>> vocab = [12, 36, 1138, 42]\n >>> idf_weights = [0.25, 0.75, 0.6, 0.4]\n >>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens\n >>> layer = tf.keras.layers.IntegerLookup(output_mode='tf_idf')\n >>> layer.set_vocabulary(vocab, idf_weights=idf_weights)\n >>> layer(data)\n \n\n To specify the idf weights for oov tokens, you will need to pass the entire\n vocabularly including the leading oov token.\n\n >>> vocab = [-1, 12, 36, 1138, 42]\n >>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]\n >>> data = tf.constant([[12, 1138, 42, 42], [42, 7, 36, 7]]) # Note OOV tokens\n >>> layer = tf.keras.layers.IntegerLookup(output_mode='tf_idf')\n >>> layer.set_vocabulary(vocab, idf_weights=idf_weights)\n >>> layer(data)\n \n\n When adapting the layer in tf_idf mode, each input sample will be considered a\n document, and idf weight per token will be calculated as\n `log(1 + num_documents / (1 + token_document_count))`.\n\n **Inverse lookup**\n\n This example demonstrates how to map indices to tokens using this layer. (You\n can also use `adapt()` with `inverse=True`, but for simplicity we'll pass the\n vocab in this example.)\n\n >>> vocab = [12, 36, 1138, 42]\n >>> data = tf.constant([[1, 3, 4], [4, 0, 2]])\n >>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab, invert=True)\n >>> layer(data)\n \n\n Note that the first index correspond to the oov token by default.\n\n\n **Forward and inverse lookup pairs**\n\n This example demonstrates how to use the vocabulary of a standard lookup\n layer to create an inverse lookup layer.\n\n >>> vocab = [12, 36, 1138, 42]\n >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]])\n >>> layer = tf.keras.layers.IntegerLookup(vocabulary=vocab)\n >>> i_layer = tf.keras.layers.IntegerLookup(\n ... vocabulary=layer.get_vocabulary(), invert=True)\n >>> int_data = layer(data)\n >>> i_layer(int_data)\n \n\n In this example, the input token 1000 resulted in an output of -1, since\n 1000 was not in the vocabulary - it got represented as an OOV, and all OOV\n tokens are returned as -1 in the inverse layer. Also, note that for the\n inverse to work, you must have already set the forward layer vocabulary\n either directly or via `adapt()` before calling `get_vocabulary()`.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "max_tokens", + "default": "None" + }, + { + "name": "num_oov_indices", "default": 1 }, { - "name": "padding", - "default": "valid" + "name": "mask_token", + "default": "None" }, { - "name": "data_format", + "name": "oov_token", + "default": -1 + }, + { + "name": "vocabulary", "default": "None" }, + { + "name": "invert", + "default": "False", + "type": "boolean" + }, + { + "name": "output_mode", + "default": "int" + }, + { + "name": "sparse", + "default": "False", + "type": "boolean" + }, + { + "name": "pad_to_max_tokens", + "default": "False", + "type": "boolean" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/preprocessing/integer_lookup.py", + "aliases": [] + }, + { + "name": "LSTM", + "base": "DropoutRNNCellMixin", + "docstring": "Long Short-Term Memory layer - Hochreiter 1997.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Based on available runtime hardware and constraints, this layer\n will choose different implementations (cuDNN-based or pure-TensorFlow)\n to maximize the performance. If a GPU is available and all\n the arguments to the layer meet the requirement of the CuDNN kernel\n (see below for details), the layer will use a fast cuDNN implementation.\n\n The requirements to use the cuDNN implementation are:\n\n 1. `activation` == `tanh`\n 2. `recurrent_activation` == `sigmoid`\n 3. `recurrent_dropout` == 0\n 4. `unroll` is `False`\n 5. `use_bias` is `True`\n 6. Inputs, if use masking, are strictly right-padded.\n 7. Eager execution is enabled in the outermost context.\n\n For example:\n\n >>> inputs = tf.random.normal([32, 10, 8])\n >>> lstm = tf.keras.layers.LSTM(4)\n >>> output = lstm(inputs)\n >>> print(output.shape)\n (32, 4)\n >>> lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True)\n >>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs)\n >>> print(whole_seq_output.shape)\n (32, 10, 4)\n >>> print(final_memory_state.shape)\n (32, 4)\n >>> print(final_carry_state.shape)\n (32, 4)\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation\n is applied (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use for the recurrent step.\n Default: sigmoid (`sigmoid`). If you pass `None`, no activation is\n applied (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs. Default: `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of\n the forget gate at initialization. Setting it to true will also force\n `bias_initializer=\"zeros\"`. This is recommended in [Jozefowicz et\n al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n return_sequences: Boolean. Whether to return the last output. in the output\n sequence, or the full sequence. Default: `False`.\n return_state: Boolean. Whether to return the last state in addition to the\n output. Default: `False`.\n go_backwards: Boolean (default `False`). If True, process the input sequence\n backwards and return the reversed sequence.\n stateful: Boolean (default `False`). If True, the last state for each sample\n at index i in a batch will be used as initial state for the sample of\n index i in the following batch.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `[timesteps, batch, feature]`, whereas in the False case, it will be\n `[batch, timesteps, feature]`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n unroll: Boolean (default `False`). If True, the network will be unrolled,\n else a symbolic loop will be used. Unrolling can speed-up a RNN, although\n it tends to be more memory-intensive. Unrolling is only suitable for short\n sequences.\n\n Call arguments:\n inputs: A 3D tensor with shape `[batch, timesteps, feature]`.\n mask: Binary tensor of shape `[batch, timesteps]` indicating whether\n a given timestep should be masked (optional, defaults to `None`).\n An individual `True` entry indicates that the corresponding timestep\n should be utilized, while a `False` entry indicates that the corresponding\n timestep should be ignored.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used (optional, defaults to `None`).\n initial_state: List of initial state tensors to be passed to the first\n call of the cell (optional, defaults to `None` which causes creation\n of zero-filled initial state tensors).\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "units", + "default": null + }, { "name": "activation", - "default": "None" + "default": "tanh" + }, + { + "name": "recurrent_activation", + "default": "sigmoid" }, { "name": "use_bias", @@ -2755,36 +2853,968 @@ "type": "boolean" }, { - "name": "kernel_initializer", - "default": "glorot_uniform" + "name": "kernel_initializer", + "default": "glorot_uniform" + }, + { + "name": "recurrent_initializer", + "default": "orthogonal" + }, + { + "name": "bias_initializer", + "default": "zeros" + }, + { + "name": "unit_forget_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "kernel_regularizer", + "default": "None" + }, + { + "name": "recurrent_regularizer", + "default": "None" + }, + { + "name": "bias_regularizer", + "default": "None" + }, + { + "name": "activity_regularizer", + "default": "None" + }, + { + "name": "kernel_constraint", + "default": "None" + }, + { + "name": "recurrent_constraint", + "default": "None" + }, + { + "name": "bias_constraint", + "default": "None" + }, + { + "name": "dropout", + "default": 0.0 + }, + { + "name": "recurrent_dropout", + "default": 0.0 + }, + { + "name": "return_sequences", + "default": "False", + "type": "boolean" + }, + { + "name": "return_state", + "default": "False", + "type": "boolean" + }, + { + "name": "go_backwards", + "default": "False", + "type": "boolean" + }, + { + "name": "stateful", + "default": "False", + "type": "boolean" + }, + { + "name": "time_major", + "default": "False", + "type": "boolean" + }, + { + "name": "unroll", + "default": "False", + "type": "boolean" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + }, + { + "name": "mask", + "default": "None" + }, + { + "name": "training", + "default": "None" + }, + { + "name": "initial_state", + "default": "None" + } + ], + "file": "keras/layers/recurrent_v2.py", + "aliases": [] + }, + { + "name": "LSTMCell", + "base": "LSTMCell", + "docstring": "Cell class for the LSTM layer.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This class processes one step within the whole time sequence input, whereas\n `tf.keras.layer.LSTM` processes the whole sequence.\n\n For example:\n\n >>> inputs = tf.random.normal([32, 10, 8])\n >>> rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4))\n >>> output = rnn(inputs)\n >>> print(output.shape)\n (32, 4)\n >>> rnn = tf.keras.layers.RNN(\n ... tf.keras.layers.LSTMCell(4),\n ... return_sequences=True,\n ... return_state=True)\n >>> whole_seq_output, final_memory_state, final_carry_state = rnn(inputs)\n >>> print(whole_seq_output.shape)\n (32, 10, 4)\n >>> print(final_memory_state.shape)\n (32, 4)\n >>> print(final_carry_state.shape)\n (32, 4)\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use. Default: hyperbolic tangent\n (`tanh`). If you pass `None`, no activation is applied (ie. \"linear\"\n activation: `a(x) = x`).\n recurrent_activation: Activation function to use for the recurrent step.\n Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs. Default: `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of\n the forget gate at initialization. Setting it to true will also force\n `bias_initializer=\"zeros\"`. This is recommended in [Jozefowicz et\n al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n\n Call arguments:\n inputs: A 2D tensor, with shape of `[batch, feature]`.\n states: List of 2 tensors that corresponding to the cell's units. Both of\n them have shape `[batch, units]`, the first tensor is the memory state\n from previous time step, the second tensor is the carry state from\n previous time step. For timestep 0, the initial state provided by user\n will be feed to cell.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "units", + "default": null + }, + { + "name": "activation", + "default": "tanh" + }, + { + "name": "recurrent_activation", + "default": "sigmoid" + }, + { + "name": "use_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "kernel_initializer", + "default": "glorot_uniform" + }, + { + "name": "recurrent_initializer", + "default": "orthogonal" + }, + { + "name": "bias_initializer", + "default": "zeros" + }, + { + "name": "unit_forget_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "kernel_regularizer", + "default": "None" + }, + { + "name": "recurrent_regularizer", + "default": "None" + }, + { + "name": "bias_regularizer", + "default": "None" + }, + { + "name": "kernel_constraint", + "default": "None" + }, + { + "name": "recurrent_constraint", + "default": "None" + }, + { + "name": "bias_constraint", + "default": "None" + }, + { + "name": "dropout", + "default": 0.0 + }, + { + "name": "recurrent_dropout", + "default": 0.0 + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + }, + { + "name": "states", + "default": null + }, + { + "name": "training", + "default": "None" + } + ], + "file": "keras/layers/recurrent_v2.py", + "aliases": [] + }, + { + "name": "Lambda", + "base": "Layer", + "docstring": "Wraps arbitrary expressions as a `Layer` object.\n\n The `Lambda` layer exists so that arbitrary expressions can be used\n as a `Layer` when constructing `Sequential`\n and Functional API models. `Lambda` layers are best suited for simple\n operations or quick experimentation. For more advanced use cases, follow\n [this guide](https://www.tensorflow.org/guide/keras/custom_layers_and_models)\n for subclassing `tf.keras.layers.Layer`.\n\n WARNING: `tf.keras.layers.Lambda` layers have (de)serialization limitations!\n\n The main reason to subclass `tf.keras.layers.Layer` instead of using a\n `Lambda` layer is saving and inspecting a Model. `Lambda` layers\n are saved by serializing the Python bytecode, which is fundamentally\n non-portable. They should only be loaded in the same environment where\n they were saved. Subclassed layers can be saved in a more portable way\n by overriding their `get_config` method. Models that rely on\n subclassed Layers are also often easier to visualize and reason about.\n\n Examples:\n\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\n model.add(Lambda(antirectifier))\n ```\n\n Variables:\n While it is possible to use Variables with Lambda layers, this practice is\n discouraged as it can easily lead to bugs. For instance, consider the\n following layer:\n\n ```python\n scale = tf.Variable(1.)\n scale_layer = tf.keras.layers.Lambda(lambda x: x * scale)\n ```\n\n Because scale_layer does not directly track the `scale` variable, it will\n not appear in `scale_layer.trainable_weights` and will therefore not be\n trained if `scale_layer` is used in a Model.\n\n A better pattern is to write a subclassed Layer:\n\n ```python\n class ScaleLayer(tf.keras.layers.Layer):\n def __init__(self):\n super(ScaleLayer, self).__init__()\n self.scale = tf.Variable(1.)\n\n def call(self, inputs):\n return inputs * self.scale\n ```\n\n In general, Lambda layers can be convenient for simple stateless\n computation, but anything more complex should use a subclass Layer instead.\n\n Args:\n function: The function to be evaluated. Takes input tensor as first\n argument.\n output_shape: Expected output shape from function. This argument can be\n inferred if not explicitly provided. Can be a tuple or function. If a\n tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input: `output_shape =\n (input_shape[0], ) + output_shape` or, the input is `None` and\n the sample dimension is also `None`: `output_shape = (None, ) +\n output_shape` If a function, it specifies the entire shape as a function\n of the\n input shape: `output_shape = f(input_shape)`\n mask: Either None (indicating no masking) or a callable with the same\n signature as the `compute_mask` layer method, or a tensor that will be\n returned as output mask regardless of what the input is.\n arguments: Optional dictionary of keyword arguments to be passed to the\n function.\n\n Input shape:\n Arbitrary. Use the keyword argument input_shape (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n\n Output shape:\n Specified by `output_shape` argument\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "function", + "default": null + }, + { + "name": "output_shape", + "default": "None" + }, + { + "name": "mask", + "default": "None" + }, + { + "name": "arguments", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + }, + { + "name": "mask", + "default": "None" + }, + { + "name": "training", + "default": "None" + } + ], + "file": "keras/layers/core.py", + "aliases": [] + }, + { + "name": "LayerNormalization", + "base": "Layer", + "docstring": "Layer normalization layer (Ba et al., 2016).\n\n Normalize the activations of the previous layer for each given example in a\n batch independently, rather than across a batch like Batch Normalization.\n i.e. applies a transformation that maintains the mean activation within each\n example close to 0 and the activation standard deviation close to 1.\n\n Given a tensor `inputs`, moments are calculated and normalization\n is performed across the axes specified in `axis`.\n\n Example:\n\n >>> data = tf.constant(np.arange(10).reshape(5, 2) * 10, dtype=tf.float32)\n >>> print(data)\n tf.Tensor(\n [[ 0. 10.]\n [20. 30.]\n [40. 50.]\n [60. 70.]\n [80. 90.]], shape=(5, 2), dtype=float32)\n\n >>> layer = tf.keras.layers.LayerNormalization(axis=1)\n >>> output = layer(data)\n >>> print(output)\n tf.Tensor(\n [[-1. 1.]\n [-1. 1.]\n [-1. 1.]\n [-1. 1.]\n [-1. 1.]], shape=(5, 2), dtype=float32)\n\n Notice that with Layer Normalization the normalization happens across the\n axes *within* each example, rather than across different examples in the\n batch.\n\n If `scale` or `center` are enabled, the layer will scale the normalized\n outputs by broadcasting them with a trainable variable `gamma`, and center\n the outputs by broadcasting with a trainable variable `beta`. `gamma` will\n default to a ones tensor and `beta` will default to a zeros tensor, so that\n centering and scaling are no-ops before training has begun.\n\n So, with scaling and centering enabled the normalization equations\n are as follows:\n\n Let the intermediate activations for a mini-batch to be the `inputs`.\n\n For each sample `x_i` in `inputs` with `k` features, we compute the mean and\n variance of the sample:\n\n ```python\n mean_i = sum(x_i[j] for j in range(k)) / k\n var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k\n ```\n\n and then compute a normalized `x_i_normalized`, including a small factor\n `epsilon` for numerical stability.\n\n ```python\n x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)\n ```\n\n And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,\n which are learned parameters:\n\n ```python\n output_i = x_i_normalized * gamma + beta\n ```\n\n `gamma` and `beta` will span the axes of `inputs` specified in `axis`, and\n this part of the inputs' shape must be fully defined.\n\n For example:\n\n >>> layer = tf.keras.layers.LayerNormalization(axis=[1, 2, 3])\n >>> layer.build([5, 20, 30, 40])\n >>> print(layer.beta.shape)\n (20, 30, 40)\n >>> print(layer.gamma.shape)\n (20, 30, 40)\n\n Note that other implementations of layer normalization may choose to define\n `gamma` and `beta` over a separate set of axes from the axes being\n normalized across. For example, Group Normalization\n ([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1\n corresponds to a Layer Normalization that normalizes across height, width,\n and channel and has `gamma` and `beta` span only the channel dimension.\n So, this Layer Normalization implementation will not match a Group\n Normalization layer with group size set to 1.\n\n Args:\n axis: Integer or List/Tuple. The axis or axes to normalize across. Typically\n this is the features axis/axes. The left-out axes are typically the batch\n axis/axes. This argument defaults to `-1`, the last dimension in the\n input.\n epsilon: Small float added to variance to avoid dividing by zero. Defaults\n to 1e-3\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored. Defaults to True.\n scale: If True, multiply by `gamma`. If False, `gamma` is not used. Defaults\n to True. When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling will be done by the next layer.\n beta_initializer: Initializer for the beta weight. Defaults to zeros.\n gamma_initializer: Initializer for the gamma weight. Defaults to ones.\n beta_regularizer: Optional regularizer for the beta weight. None by default.\n gamma_regularizer: Optional regularizer for the gamma weight. None by\n default.\n beta_constraint: Optional constraint for the beta weight. None by default.\n gamma_constraint: Optional constraint for the gamma weight. None by default.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape` (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n\n Output shape:\n Same shape as input.\n\n Reference:\n - [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "axis", + "default": -1 + }, + { + "name": "epsilon", + "default": 0.001 + }, + { + "name": "center", + "default": "True", + "type": "boolean" + }, + { + "name": "scale", + "default": "True", + "type": "boolean" + }, + { + "name": "beta_initializer", + "default": "zeros" + }, + { + "name": "gamma_initializer", + "default": "ones" + }, + { + "name": "beta_regularizer", + "default": "None" + }, + { + "name": "gamma_regularizer", + "default": "None" + }, + { + "name": "beta_constraint", + "default": "None" + }, + { + "name": "gamma_constraint", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/normalization/layer_normalization.py", + "aliases": [] + }, + { + "name": "LeakyReLU", + "base": "Layer", + "docstring": "Leaky version of a Rectified Linear Unit.\n\n It allows a small gradient when the unit is not active:\n\n ```\n f(x) = alpha * x if x < 0\n f(x) = x if x >= 0\n ```\n\n Usage:\n\n >>> layer = tf.keras.layers.LeakyReLU()\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [-0.9, -0.3, 0.0, 2.0]\n >>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [-0.3, -0.1, 0.0, 2.0]\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n alpha: Float >= 0. Negative slope coefficient. Default to 0.3.\n\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "alpha", + "default": 0.3 + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/advanced_activations.py", + "aliases": [] + }, + { + "name": "LocallyConnected1D", + "base": "Layer", + "docstring": "Locally-connected layer for 1D inputs.\n\n The `LocallyConnected1D` layer works similarly to\n the `Conv1D` layer, except that weights are unshared,\n that is, a different set of filters is applied at each different patch\n of the input.\n\n Note: layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n\n Example:\n ```python\n # apply a unshared weight convolution 1d of length 3 to a sequence with\n # 10 timesteps, with 64 output filters\n model = Sequential()\n model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))\n # now model.output_shape == (None, 8, 64)\n # add a new conv1d on top\n model.add(LocallyConnected1D(32, 3))\n # now model.output_shape == (None, 6, 32)\n ```\n\n Args:\n filters: Integer, the dimensionality of the output space (i.e. the number\n of output filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer, specifying the\n length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer, specifying the\n stride length of the convolution.\n padding: Currently only supports `\"valid\"` (case-insensitive). `\"same\"`\n may be supported in the future. `\"valid\"` means no padding.\n data_format: A string, one of `channels_last` (default) or\n `channels_first`. The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape `(batch, length,\n channels)` while `channels_first` corresponds to inputs with shape\n `(batch, channels, length)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`. If you\n never set it, then it will be \"channels_last\".\n activation: Activation function to use. If you don't specify anything, no\n activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\")..\n kernel_constraint: Constraint function applied to the kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n implementation: implementation mode, either `1`, `2`, or `3`. `1` loops\n over input spatial locations to perform the forward pass. It is\n memory-efficient but performs a lot of (small) ops. `2` stores layer\n weights in a dense but sparsely-populated 2D matrix and implements the\n forward pass as a single matrix-multiply. It uses a lot of RAM but\n performs few (large) ops. `3` stores layer weights in a sparse tensor\n and implements the forward pass as a single sparse matrix-multiply.\n How to choose:\n `1`: large, dense models,\n `2`: small models,\n `3`: large, sparse models, where \"large\" stands for large\n input/output activations (i.e. many `filters`, `input_filters`,\n large `input_size`, `output_size`), and \"sparse\" stands for few\n connections between inputs and outputs, i.e. small ratio `filters *\n input_filters * kernel_size / (input_size * strides)`, where inputs\n to and outputs of the layer are assumed to have shapes `(input_size,\n input_filters)`, `(output_size, filters)` respectively. It is\n recommended to benchmark each in the setting of interest to pick the\n most efficient one (in terms of speed and memory usage). Correct\n choice of implementation can lead to dramatic speed improvements\n (e.g. 50X), potentially at the expense of RAM. Also, only\n `padding=\"valid\"` is supported by `implementation=1`.\n Input shape:\n 3D tensor with shape: `(batch_size, steps, input_dim)`\n Output shape:\n 3D tensor with shape: `(batch_size, new_steps, filters)` `steps` value\n might have changed due to padding or strides.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "filters", + "default": null + }, + { + "name": "kernel_size", + "default": null + }, + { + "name": "strides", + "default": 1 + }, + { + "name": "padding", + "default": "valid" + }, + { + "name": "data_format", + "default": "None" + }, + { + "name": "activation", + "default": "None" + }, + { + "name": "use_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "kernel_initializer", + "default": "glorot_uniform" + }, + { + "name": "bias_initializer", + "default": "zeros" + }, + { + "name": "kernel_regularizer", + "default": "None" + }, + { + "name": "bias_regularizer", + "default": "None" + }, + { + "name": "activity_regularizer", + "default": "None" + }, + { + "name": "kernel_constraint", + "default": "None" + }, + { + "name": "bias_constraint", + "default": "None" + }, + { + "name": "implementation", + "default": 1 + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/local.py", + "aliases": [] + }, + { + "name": "LocallyConnected2D", + "base": "Layer", + "docstring": "Locally-connected layer for 2D inputs.\n\n The `LocallyConnected2D` layer works similarly\n to the `Conv2D` layer, except that weights are unshared,\n that is, a different set of filters is applied at each\n different patch of the input.\n\n Note: layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n\n Examples:\n ```python\n # apply a 3x3 unshared weights convolution with 64 output filters on a\n 32x32 image\n # with `data_format=\"channels_last\"`:\n model = Sequential()\n model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))\n # now model.output_shape == (None, 30, 30, 64)\n # notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64\n parameters\n\n # add a 3x3 unshared weights convolution on top, with 32 output filters:\n model.add(LocallyConnected2D(32, (3, 3)))\n # now model.output_shape == (None, 28, 28, 32)\n ```\n\n Args:\n filters: Integer, the dimensionality of the output space (i.e. the number\n of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the width\n and height of the 2D convolution window. Can be a single integer to\n specify the same value for all spatial dimensions.\n strides: An integer or tuple/list of 2 integers, specifying the strides of\n the convolution along the width and height. Can be a single integer to\n specify the same value for all spatial dimensions.\n padding: Currently only support `\"valid\"` (case-insensitive). `\"same\"`\n will be supported in future. `\"valid\"` means no padding.\n data_format: A string, one of `channels_last` (default) or\n `channels_first`. The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape `(batch, height, width,\n channels)` while `channels_first` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n \"channels_last\".\n activation: Activation function to use. If you don't specify anything, no\n activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\").\n kernel_constraint: Constraint function applied to the kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n implementation: implementation mode, either `1`, `2`, or `3`. `1` loops\n over input spatial locations to perform the forward pass. It is\n memory-efficient but performs a lot of (small) ops. `2` stores layer\n weights in a dense but sparsely-populated 2D matrix and implements the\n forward pass as a single matrix-multiply. It uses a lot of RAM but\n performs few (large) ops. `3` stores layer weights in a sparse tensor\n and implements the forward pass as a single sparse matrix-multiply.\n How to choose:\n `1`: large, dense models,\n `2`: small models,\n `3`: large, sparse models, where \"large\" stands for large\n input/output activations (i.e. many `filters`, `input_filters`,\n large `np.prod(input_size)`, `np.prod(output_size)`), and \"sparse\"\n stands for few connections between inputs and outputs, i.e. small\n ratio `filters * input_filters * np.prod(kernel_size) /\n (np.prod(input_size) * np.prod(strides))`, where inputs to and\n outputs of the layer are assumed to have shapes `input_size +\n (input_filters,)`, `output_size + (filters,)` respectively. It is\n recommended to benchmark each in the setting of interest to pick the\n most efficient one (in terms of speed and memory usage). Correct\n choice of implementation can lead to dramatic speed improvements\n (e.g. 50X), potentially at the expense of RAM. Also, only\n `padding=\"valid\"` is supported by `implementation=1`.\n Input shape:\n 4D tensor with shape: `(samples, channels, rows, cols)` if\n data_format='channels_first'\n or 4D tensor with shape: `(samples, rows, cols, channels)` if\n data_format='channels_last'.\n Output shape:\n 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if\n data_format='channels_first'\n or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if\n data_format='channels_last'. `rows` and `cols` values might have changed\n due to padding.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "filters", + "default": null + }, + { + "name": "kernel_size", + "default": null + }, + { + "name": "strides", + "default": [ + 1, + 1 + ] + }, + { + "name": "padding", + "default": "valid" + }, + { + "name": "data_format", + "default": "None" + }, + { + "name": "activation", + "default": "None" + }, + { + "name": "use_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "kernel_initializer", + "default": "glorot_uniform" + }, + { + "name": "bias_initializer", + "default": "zeros" + }, + { + "name": "kernel_regularizer", + "default": "None" + }, + { + "name": "bias_regularizer", + "default": "None" + }, + { + "name": "activity_regularizer", + "default": "None" + }, + { + "name": "kernel_constraint", + "default": "None" + }, + { + "name": "bias_constraint", + "default": "None" + }, + { + "name": "implementation", + "default": 1 + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/local.py", + "aliases": [] + }, + { + "name": "Masking", + "base": "Layer", + "docstring": "Masks a sequence by using a mask value to skip timesteps.\n\n For each timestep in the input tensor (dimension #1 in the tensor),\n if all values in the input tensor at that timestep\n are equal to `mask_value`, then the timestep will be masked (skipped)\n in all downstream layers (as long as they support masking).\n\n If any downstream layer does not support masking yet receives such\n an input mask, an exception will be raised.\n\n Example:\n\n Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\n to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you\n lack data for these timesteps. You can:\n\n - Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n - Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:\n\n ```python\n samples, timesteps, features = 32, 10, 8\n inputs = np.random.random([samples, timesteps, features]).astype(np.float32)\n inputs[:, 3, :] = 0.\n inputs[:, 5, :] = 0.\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Masking(mask_value=0.,\n input_shape=(timesteps, features)))\n model.add(tf.keras.layers.LSTM(32))\n\n output = model(inputs)\n # The time step 3 and 5 will be skipped from LSTM calculation.\n ```\n\n See [the masking and padding guide](\n https://www.tensorflow.org/guide/keras/masking_and_padding)\n for more details.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "mask_value", + "default": 0.0 + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/core.py", + "aliases": [] + }, + { + "name": "MaxPooling1D", + "base": "Pooling1D", + "docstring": "Max pooling operation for 1D temporal data.\n\n Downsamples the input representation by taking the maximum value over a\n spatial window of size `pool_size`. The window is shifted by `strides`. The\n resulting output, when using the `\"valid\"` padding option, has a shape of:\n `output_shape = (input_shape - pool_size + 1) / strides)`\n\n The resulting output shape when using the `\"same\"` padding option is:\n `output_shape = input_shape / strides`\n\n For example, for `strides=1` and `padding=\"valid\"`:\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=1, padding='valid')\n >>> max_pool_1d(x)\n \n\n For example, for `strides=2` and `padding=\"valid\"`:\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=2, padding='valid')\n >>> max_pool_1d(x)\n \n\n For example, for `strides=1` and `padding=\"same\"`:\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=1, padding='same')\n >>> max_pool_1d(x)\n \n\n Args:\n pool_size: Integer, size of the max pooling window.\n strides: Integer, or None. Specifies how much the pooling window moves\n for each pooling step.\n If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, steps)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, downsampled_steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, downsampled_steps)`.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "pool_size", + "default": 2 + }, + { + "name": "strides", + "default": "None" + }, + { + "name": "padding", + "default": "valid" + }, + { + "name": "data_format", + "default": "channels_last" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/pooling.py", + "aliases": [ + "MaxPool1D" + ] + }, + { + "name": "MaxPooling2D", + "base": "Pooling2D", + "docstring": "Max pooling operation for 2D spatial data.\n\n Downsamples the input along its spatial dimensions (height and width)\n by taking the maximum value over an input window\n (of size defined by `pool_size`) for each channel of the input.\n The window is shifted by `strides` along each dimension.\n\n The resulting output,\n when using the `\"valid\"` padding option, has a spatial shape\n (number of rows or columns) of:\n `output_shape = math.floor((input_shape - pool_size) / strides) + 1`\n (when `input_shape >= pool_size`)\n\n The resulting output shape when using the `\"same\"` padding option is:\n `output_shape = math.floor((input_shape - 1) / strides) + 1`\n\n For example, for `strides=(1, 1)` and `padding=\"valid\"`:\n\n >>> x = tf.constant([[1., 2., 3.],\n ... [4., 5., 6.],\n ... [7., 8., 9.]])\n >>> x = tf.reshape(x, [1, 3, 3, 1])\n >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n ... strides=(1, 1), padding='valid')\n >>> max_pool_2d(x)\n \n\n For example, for `strides=(2, 2)` and `padding=\"valid\"`:\n\n >>> x = tf.constant([[1., 2., 3., 4.],\n ... [5., 6., 7., 8.],\n ... [9., 10., 11., 12.]])\n >>> x = tf.reshape(x, [1, 3, 4, 1])\n >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n ... strides=(2, 2), padding='valid')\n >>> max_pool_2d(x)\n \n\n Usage Example:\n\n >>> input_image = tf.constant([[[[1.], [1.], [2.], [4.]],\n ... [[2.], [2.], [3.], [2.]],\n ... [[4.], [1.], [1.], [1.]],\n ... [[2.], [2.], [1.], [4.]]]])\n >>> output = tf.constant([[[[1], [0]],\n ... [[0], [1]]]])\n >>> model = tf.keras.models.Sequential()\n >>> model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n ... input_shape=(4, 4, 1)))\n >>> model.compile('adam', 'mean_squared_error')\n >>> model.predict(input_image, steps=1)\n array([[[[2.],\n [4.]],\n [[4.],\n [4.]]]], dtype=float32)\n\n For example, for stride=(1, 1) and padding=\"same\":\n\n >>> x = tf.constant([[1., 2., 3.],\n ... [4., 5., 6.],\n ... [7., 8., 9.]])\n >>> x = tf.reshape(x, [1, 3, 3, 1])\n >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n ... strides=(1, 1), padding='same')\n >>> max_pool_2d(x)\n \n\n Args:\n pool_size: integer or tuple of 2 integers,\n window size over which to take the maximum.\n `(2, 2)` will take the max value over a 2x2 pooling window.\n If only one integer is specified, the same window length\n will be used for both dimensions.\n strides: Integer, tuple of 2 integers, or None.\n Strides values. Specifies how far the pooling window moves\n for each pooling step. If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, rows, cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, rows, cols)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.\n\n Returns:\n A tensor of rank 4 representing the maximum pooled values. See above for\n output shape.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "pool_size", + "default": [ + 2, + 2 + ] + }, + { + "name": "strides", + "default": "None" + }, + { + "name": "padding", + "default": "valid" + }, + { + "name": "data_format", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/pooling.py", + "aliases": [ + "MaxPool2D" + ] + }, + { + "name": "MaxPooling3D", + "base": "Pooling3D", + "docstring": "Max pooling operation for 3D data (spatial or spatio-temporal).\n\n Downsamples the input along its spatial dimensions (depth, height, and width)\n by taking the maximum value over an input window\n (of size defined by `pool_size`) for each channel of the input.\n The window is shifted by `strides` along each dimension.\n\n Args:\n pool_size: Tuple of 3 integers,\n factors by which to downscale (dim1, dim2, dim3).\n `(2, 2, 2)` will halve the size of the 3D input in each dimension.\n strides: tuple of 3 integers, or None. Strides values.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n\n Output shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`\n\n Example:\n\n ```python\n depth = 30\n height = 30\n width = 30\n input_channels = 3\n\n inputs = tf.keras.Input(shape=(depth, height, width, input_channels))\n layer = tf.keras.layers.MaxPooling3D(pool_size=3)\n outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)\n ```\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "pool_size", + "default": [ + 2, + 2, + 2 + ] + }, + { + "name": "strides", + "default": "None" + }, + { + "name": "padding", + "default": "valid" + }, + { + "name": "data_format", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/pooling.py", + "aliases": [ + "MaxPool3D" + ] + }, + { + "name": "Maximum", + "base": "_Merge", + "docstring": "Layer that computes the maximum (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> maxed = tf.keras.layers.Maximum()([x1, x2])\n >>> maxed.shape\n TensorShape([5, 8])\n ", + "arguments": [ + { + "name": "self", + "default": null + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/merge.py", + "aliases": [] + }, + { + "name": "Minimum", + "base": "_Merge", + "docstring": "Layer that computes the minimum (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> minned = tf.keras.layers.Minimum()([x1, x2])\n >>> minned.shape\n TensorShape([5, 8])\n ", + "arguments": [ + { + "name": "self", + "default": null + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/merge.py", + "aliases": [] + }, + { + "name": "MultiHeadAttention", + "base": "Layer", + "docstring": "MultiHeadAttention layer.\n\n This is an implementation of multi-headed attention as described in the paper\n \"Attention is all you Need\" (Vaswani et al., 2017).\n If `query`, `key,` `value` are the same, then\n this is self-attention. Each timestep in `query` attends to the\n corresponding sequence in `key`, and returns a fixed-width vector.\n\n This layer first projects `query`, `key` and `value`. These are\n (effectively) a list of tensors of length `num_attention_heads`, where the\n corresponding shapes are `(batch_size, , key_dim)`,\n `(batch_size, , key_dim)`,\n `(batch_size, , value_dim)`.\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor.\n\n Finally, the result tensor with the last dimension as value_dim can take an\n linear projection and return.\n\n Examples:\n\n Performs 1D cross-attention over two sequence inputs with an attention mask.\n Returns the additional attention weights over heads.\n\n >>> layer = MultiHeadAttention(num_heads=2, key_dim=2)\n >>> target = tf.keras.Input(shape=[8, 16])\n >>> source = tf.keras.Input(shape=[4, 16])\n >>> output_tensor, weights = layer(target, source,\n ... return_attention_scores=True)\n >>> print(output_tensor.shape)\n (None, 8, 16)\n >>> print(weights.shape)\n (None, 2, 8, 4)\n\n Performs 2D self-attention over a 5D input tensor on axes 2 and 3.\n\n >>> layer = MultiHeadAttention(num_heads=2, key_dim=2, attention_axes=(2, 3))\n >>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16])\n >>> output_tensor = layer(input_tensor, input_tensor)\n >>> print(output_tensor.shape)\n (None, 5, 3, 4, 16)\n\n Args:\n num_heads: Number of attention heads.\n key_dim: Size of each attention head for query and key.\n value_dim: Size of each attention head for value.\n dropout: Dropout probability.\n use_bias: Boolean, whether the dense layers use bias vectors/matrices.\n output_shape: The expected shape of an output tensor, besides the batch and\n sequence dims. If not specified, projects back to the key feature dim.\n attention_axes: axes over which the attention is applied. `None` means\n attention over all axes, but batch, heads, and features.\n kernel_initializer: Initializer for dense layer kernels.\n bias_initializer: Initializer for dense layer biases.\n kernel_regularizer: Regularizer for dense layer kernels.\n bias_regularizer: Regularizer for dense layer biases.\n activity_regularizer: Regularizer for dense layer activity.\n kernel_constraint: Constraint for dense layer kernels.\n bias_constraint: Constraint for dense layer kernels.\n\n Call arguments:\n query: Query `Tensor` of shape `(B, T, dim)`.\n value: Value `Tensor` of shape `(B, S, dim)`.\n key: Optional key `Tensor` of shape `(B, S, dim)`. If not given, will use\n `value` for both `key` and `value`, which is the most common case.\n attention_mask: a boolean mask of shape `(B, T, S)`, that prevents\n attention to certain positions. The boolean mask specifies which query\n elements can attend to which key elements, 1 indicates attention and 0\n indicates no attention. Broadcasting can happen for the missing batch\n dimensions and the head dimension.\n return_attention_scores: A boolean to indicate whether the output should\n be attention output if True, or (attention_output, attention_scores) if\n False. Defaults to False.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n Defaults to either using the training mode of the parent layer/model,\n or False (inference) if there is no parent layer.\n\n Returns:\n attention_output: The result of the computation, of shape `(B, T, E)`,\n where `T` is for target sequence shapes and `E` is the query input last\n dimension if `output_shape` is `None`. Otherwise, the multi-head outputs\n are project to the shape specified by `output_shape`.\n attention_scores: [Optional] multi-head attention coeffients over\n attention axes.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "num_heads", + "default": null + }, + { + "name": "key_dim", + "default": null + }, + { + "name": "value_dim", + "default": "None" + }, + { + "name": "dropout", + "default": 0.0 + }, + { + "name": "use_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "output_shape", + "default": "None" + }, + { + "name": "attention_axes", + "default": "None" + }, + { + "name": "kernel_initializer", + "default": "glorot_uniform" + }, + { + "name": "bias_initializer", + "default": "zeros" + }, + { + "name": "kernel_regularizer", + "default": "None" + }, + { + "name": "bias_regularizer", + "default": "None" + }, + { + "name": "activity_regularizer", + "default": "None" + }, + { + "name": "kernel_constraint", + "default": "None" + }, + { + "name": "bias_constraint", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "query", + "default": null + }, + { + "name": "value", + "default": null + }, + { + "name": "key", + "default": "None" + }, + { + "name": "attention_mask", + "default": "None" + }, + { + "name": "return_attention_scores", + "default": "False", + "type": "boolean" + }, + { + "name": "training", + "default": "None" + } + ], + "file": "keras/layers/multi_head_attention.py", + "aliases": [] + }, + { + "name": "Multiply", + "base": "_Merge", + "docstring": "Layer that multiplies (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> multiplied = tf.keras.layers.Multiply()([x1, x2])\n >>> multiplied.shape\n TensorShape([5, 8])\n ", + "arguments": [ + { + "name": "self", + "default": null + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/merge.py", + "aliases": [] + }, + { + "name": "Normalization", + "base": "PreprocessingLayer", + "docstring": "Feature-wise normalization of the data.\n\n This layer will coerce its inputs into a distribution centered around\n 0 with standard deviation 1. It accomplishes this by precomputing the mean and\n variance of the data, and calling `(input - mean) / sqrt(var)` at runtime.\n\n What happens in `adapt()`: Compute mean and variance of the data and store\n them as the layer's weights. `adapt()` should be called before `fit()`,\n `evaluate()`, or `predict()`.\n\n Args:\n axis: Integer, tuple of integers, or None. The axis or axes that should\n have a separate mean and variance for each index in the shape. For\n example, if shape is `(None, 5)` and `axis=1`, the layer will track 5\n separate mean and variance values for the last axis. If `axis` is set to\n `None`, the layer will normalize all elements in the input by a scalar\n mean and variance. Defaults to -1, where the last axis of the input is\n assumed to be a feature dimension and is normalized per index. Note that\n in the specific case of batched scalar inputs where the only axis is the\n batch axis, the default will normalize each index in the batch\n separately. In this case, consider passing `axis=None`.\n mean: The mean value(s) to use during normalization. The passed value(s)\n will be broadcast to the shape of the kept axes above; if the value(s)\n cannot be broadcast, an error will be raised when this layer's `build()`\n method is called.\n variance: The variance value(s) to use during normalization. The passed\n value(s) will be broadcast to the shape of the kept axes above; if the\n value(s) cannot be broadcast, an error will be raised when this layer's\n `build()` method is called.\n\n Examples:\n\n Calculate a global mean and variance by analyzing the dataset in `adapt()`.\n\n >>> adapt_data = np.array([1., 2., 3., 4., 5.], dtype='float32')\n >>> input_data = np.array([1., 2., 3.], dtype='float32')\n >>> layer = tf.keras.layers.Normalization(axis=None)\n >>> layer.adapt(adapt_data)\n >>> layer(input_data)\n \n\n Calculate a mean and variance for each index on the last axis.\n\n >>> adapt_data = np.array([[0., 7., 4.],\n ... [2., 9., 6.],\n ... [0., 7., 4.],\n ... [2., 9., 6.]], dtype='float32')\n >>> input_data = np.array([[0., 7., 4.]], dtype='float32')\n >>> layer = tf.keras.layers.Normalization(axis=-1)\n >>> layer.adapt(adapt_data)\n >>> layer(input_data)\n \n\n Pass the mean and variance directly.\n\n >>> input_data = np.array([[1.], [2.], [3.]], dtype='float32')\n >>> layer = tf.keras.layers.Normalization(mean=3., variance=2.)\n >>> layer(input_data)\n \n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "axis", + "default": -1 + }, + { + "name": "mean", + "default": "None" + }, + { + "name": "variance", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/preprocessing/normalization.py", + "aliases": [] + }, + { + "name": "PReLU", + "base": "Layer", + "docstring": "Parametric Rectified Linear Unit.\n\n It follows:\n\n ```\n f(x) = alpha * x for x < 0\n f(x) = x for x >= 0\n ```\n\n where `alpha` is a learned array with the same shape as x.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n alpha_initializer: Initializer function for the weights.\n alpha_regularizer: Regularizer for the weights.\n alpha_constraint: Constraint for the weights.\n shared_axes: The axes along which to share learnable\n parameters for the activation function.\n For example, if the incoming feature maps\n are from a 2D convolution\n with output shape `(batch, height, width, channels)`,\n and you wish to share parameters across space\n so that each filter only has one set of parameters,\n set `shared_axes=[1, 2]`.\n ", + "arguments": [ + { + "name": "self", + "default": null }, { - "name": "bias_initializer", + "name": "alpha_initializer", "default": "zeros" }, { - "name": "kernel_regularizer", + "name": "alpha_regularizer", "default": "None" }, { - "name": "bias_regularizer", + "name": "alpha_constraint", "default": "None" }, { - "name": "activity_regularizer", + "name": "shared_axes", "default": "None" - }, + } + ], + "abstract": false, + "outputs": [], + "inputs": [ { - "name": "kernel_constraint", - "default": "None" + "name": "self", + "default": null }, { - "name": "bias_constraint", - "default": "None" + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/advanced_activations.py", + "aliases": [] + }, + { + "name": "Permute", + "base": "Layer", + "docstring": "Permutes the dimensions of the input according to a given pattern.\n\n Useful e.g. connecting RNNs and convnets.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n Args:\n dims: Tuple of integers. Permutation pattern does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimensions\n of the input.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n ", + "arguments": [ + { + "name": "self", + "default": null }, { - "name": "implementation", - "default": 1 + "name": "dims", + "default": null } ], "abstract": false, @@ -2799,81 +3829,92 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/local.py", + "file": "keras/layers/core.py", "aliases": [] }, { - "name": "LocallyConnected2D", + "name": "RNN", "base": "Layer", - "docstring": "Locally-connected layer for 2D inputs.\n\n The `LocallyConnected2D` layer works similarly\n to the `Conv2D` layer, except that weights are unshared,\n that is, a different set of filters is applied at each\n different patch of the input.\n\n Note: layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n\n Examples:\n ```python\n # apply a 3x3 unshared weights convolution with 64 output filters on a\n 32x32 image\n # with `data_format=\"channels_last\"`:\n model = Sequential()\n model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))\n # now model.output_shape == (None, 30, 30, 64)\n # notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64\n parameters\n\n # add a 3x3 unshared weights convolution on top, with 32 output filters:\n model.add(LocallyConnected2D(32, (3, 3)))\n # now model.output_shape == (None, 28, 28, 32)\n ```\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n padding: Currently only support `\"valid\"` (case-insensitive).\n `\"same\"` will be supported in future.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n kernel_constraint: Constraint function applied to the kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n implementation: implementation mode, either `1`, `2`, or `3`.\n `1` loops over input spatial locations to perform the forward pass.\n It is memory-efficient but performs a lot of (small) ops.\n\n `2` stores layer weights in a dense but sparsely-populated 2D matrix\n and implements the forward pass as a single matrix-multiply. It uses\n a lot of RAM but performs few (large) ops.\n\n `3` stores layer weights in a sparse tensor and implements the forward\n pass as a single sparse matrix-multiply.\n\n How to choose:\n\n `1`: large, dense models,\n `2`: small models,\n `3`: large, sparse models,\n\n where \"large\" stands for large input/output activations\n (i.e. many `filters`, `input_filters`, large `np.prod(input_size)`,\n `np.prod(output_size)`), and \"sparse\" stands for few connections\n between inputs and outputs, i.e. small ratio\n `filters * input_filters * np.prod(kernel_size) / (np.prod(input_size)\n * np.prod(strides))`, where inputs to and outputs of the layer are\n assumed to have shapes `input_size + (input_filters,)`,\n `output_size + (filters,)` respectively.\n\n It is recommended to benchmark each in the setting of interest to pick\n the most efficient one (in terms of speed and memory usage). Correct\n choice of implementation can lead to dramatic speed improvements (e.g.\n 50X), potentially at the expense of RAM.\n\n Also, only `padding=\"valid\"` is supported by `implementation=1`.\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n ", + "docstring": "Base class for recurrent layers.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Args:\n cell: A RNN cell instance or a list of RNN cell instances.\n A RNN cell is a class that has:\n - A `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - A `state_size` attribute. This can be a single integer\n (single state) in which case it is the size of the recurrent\n state. This can also be a list/tuple of integers (one size per state).\n The `state_size` can also be TensorShape or tuple/list of\n TensorShape, to represent high dimension state.\n - A `output_size` attribute. This can be a single integer or a\n TensorShape, which represent the shape of the output. For backward\n compatible reason, if this attribute is not available for the\n cell, the value will be inferred by the first element of the\n `state_size`.\n - A `get_initial_state(inputs=None, batch_size=None, dtype=None)`\n method that creates a tensor meant to be fed to `call()` as the\n initial state, if the user didn't specify any initial state via other\n means. The returned initial state should have a shape of\n [batch_size, cell.state_size]. The cell might choose to create a\n tensor full of zeros, or full of other values based on the cell's\n implementation.\n `inputs` is the input tensor to the RNN layer, which should\n contain the batch size as its shape[0], and also dtype. Note that\n the shape[0] might be `None` during the graph construction. Either\n the `inputs` or the pair of `batch_size` and `dtype` are provided.\n `batch_size` is a scalar tensor that represents the batch size\n of the inputs. `dtype` is `tf.DType` that represents the dtype of\n the inputs.\n For backward compatibility, if this method is not implemented\n by the cell, the RNN layer will create a zero filled tensor with the\n size of [batch_size, cell.state_size].\n In the case that `cell` is a list of RNN cell instances, the cells\n will be stacked on top of each other in the RNN, resulting in an\n efficient stacked RNN.\n return_sequences: Boolean (default `False`). Whether to return the last\n output in the output sequence, or the full sequence.\n return_state: Boolean (default `False`). Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default `False`).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default `False`). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default `False`).\n If True, the network will be unrolled, else a symbolic loop will be used.\n Unrolling can speed-up a RNN, although it tends to be more\n memory-intensive. Unrolling is only suitable for short sequences.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `(timesteps, batch, ...)`, whereas in the False case, it will be\n `(batch, timesteps, ...)`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n zero_output_for_mask: Boolean (default `False`).\n Whether the output should use zeros for the masked timesteps. Note that\n this field is only used when `return_sequences` is True and mask is\n provided. It can useful if you want to reuse the raw output sequence of\n the RNN without interference from the masked timesteps, eg, merging\n bidirectional RNNs.\n\n Call arguments:\n inputs: Input tensor.\n mask: Binary tensor of shape `[batch_size, timesteps]` indicating whether\n a given timestep should be masked. An individual `True` entry indicates\n that the corresponding timestep should be utilized, while a `False`\n entry indicates that the corresponding timestep should be ignored.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is for use with cells that use dropout.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n constants: List of constant tensors to be passed to the cell at each\n timestep.\n\n Input shape:\n N-D tensor with shape `[batch_size, timesteps, ...]` or\n `[timesteps, batch_size, ...]` when time_major is True.\n\n Output shape:\n - If `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each with shape `[batch_size, state_size]`, where `state_size` could\n be a high dimension tensor shape.\n - If `return_sequences`: N-D tensor with shape\n `[batch_size, timesteps, output_size]`, where `output_size` could\n be a high dimension tensor shape, or\n `[timesteps, batch_size, output_size]` when `time_major` is True.\n - Else, N-D tensor with shape `[batch_size, output_size]`, where\n `output_size` could be a high dimension tensor shape.\n\n Masking:\n This layer supports masking for input data with a variable number\n of timesteps. To introduce masks to your data,\n use an [tf.keras.layers.Embedding] layer with the `mask_zero` parameter\n set to `True`.\n\n Note on using statefulness in RNNs:\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch. This assumes a one-to-one mapping\n between samples in different successive batches.\n\n To enable statefulness:\n - Specify `stateful=True` in the layer constructor.\n - Specify a fixed batch size for your model, by passing\n If sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\n Else for functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\n This is the expected shape of your inputs\n *including the batch size*.\n It should be a tuple of integers, e.g. `(32, 10, 100)`.\n - Specify `shuffle=False` when calling `fit()`.\n\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n\n Note on specifying the initial state of RNNs:\n You can specify the initial state of RNN layers symbolically by\n calling them with the keyword argument `initial_state`. The value of\n `initial_state` should be a tensor or list of tensors representing\n the initial state of the RNN layer.\n\n You can specify the initial state of RNN layers numerically by\n calling `reset_states` with the keyword argument `states`. The value of\n `states` should be a numpy array or list of numpy arrays representing\n the initial state of the RNN layer.\n\n Note on passing external constants to RNNs:\n You can pass \"external\" constants to the cell using the `constants`\n keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This\n requires that the `cell.call` method accepts the same keyword argument\n `constants`. Such constants can be used to condition the cell\n transformation on additional static inputs (not changing over time),\n a.k.a. an attention mechanism.\n\n Examples:\n\n ```python\n # First, let's define a RNN Cell, as a layer subclass.\n\n class MinimalRNNCell(keras.layers.Layer):\n\n def __init__(self, units, **kwargs):\n self.units = units\n self.state_size = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = backend.dot(inputs, self.kernel)\n output = h + backend.dot(prev_output, self.recurrent_kernel)\n return output, [output]\n\n # Let's use this cell in a RNN layer:\n\n cell = MinimalRNNCell(32)\n x = keras.Input((None, 5))\n layer = RNN(cell)\n y = layer(x)\n\n # Here's how to use the cell to build a stacked RNN:\n\n cells = [MinimalRNNCell(32), MinimalRNNCell(64)]\n x = keras.Input((None, 5))\n layer = RNN(cells)\n y = layer(x)\n ```\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "filters", + "name": "cell", "default": null }, { - "name": "kernel_size", - "default": null + "name": "return_sequences", + "default": "False", + "type": "boolean" }, { - "name": "strides", - "default": [ - 1, - 1 - ] + "name": "return_state", + "default": "False", + "type": "boolean" }, { - "name": "padding", - "default": "valid" + "name": "go_backwards", + "default": "False", + "type": "boolean" }, { - "name": "data_format", - "default": "None" + "name": "stateful", + "default": "False", + "type": "boolean" }, { - "name": "activation", - "default": "None" + "name": "unroll", + "default": "False", + "type": "boolean" }, { - "name": "use_bias", - "default": "True", + "name": "time_major", + "default": "False", "type": "boolean" - }, + } + ], + "abstract": false, + "outputs": [], + "inputs": [ { - "name": "kernel_initializer", - "default": "glorot_uniform" + "name": "self", + "default": null }, { - "name": "bias_initializer", - "default": "zeros" + "name": "inputs", + "default": null }, { - "name": "kernel_regularizer", + "name": "initial_state", "default": "None" }, { - "name": "bias_regularizer", + "name": "constants", "default": "None" - }, + } + ], + "file": "keras/layers/recurrent.py", + "aliases": [] + }, + { + "name": "RandomContrast", + "base": "Layer", + "docstring": "Adjust the contrast of an image or images by a random factor.\n\n Contrast is adjusted independently for each channel of each image during\n training.\n\n For each channel, this layer computes the mean of the image pixels in the\n channel and then adjusts each component `x` of each pixel to\n `(x - mean) * contrast_factor + mean`.\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Attributes:\n factor: a positive float represented as fraction of value, or a tuple of\n size 2 representing lower and upper bound. When represented as a single\n float, lower = upper. The contrast factor will be randomly picked between\n `[1.0 - lower, 1.0 + upper]`.\n seed: Integer. Used to create a random seed.\n ", + "arguments": [ { - "name": "activity_regularizer", - "default": "None" + "name": "self", + "default": null }, { - "name": "kernel_constraint", - "default": "None" + "name": "factor", + "default": null }, { - "name": "bias_constraint", + "name": "seed", "default": "None" - }, - { - "name": "implementation", - "default": 1 } ], "abstract": false, @@ -2886,23 +3927,36 @@ { "name": "inputs", "default": null + }, + { + "name": "training", + "default": "True", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/local.py", + "file": "keras/layers/preprocessing/image_preprocessing.py", "aliases": [] }, { - "name": "Masking", + "name": "RandomCrop", "base": "Layer", - "docstring": "Masks a sequence by using a mask value to skip timesteps.\n\n For each timestep in the input tensor (dimension #1 in the tensor),\n if all values in the input tensor at that timestep\n are equal to `mask_value`, then the timestep will be masked (skipped)\n in all downstream layers (as long as they support masking).\n\n If any downstream layer does not support masking yet receives such\n an input mask, an exception will be raised.\n\n Example:\n\n Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\n to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you\n lack data for these timesteps. You can:\n\n - Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n - Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:\n\n ```python\n samples, timesteps, features = 32, 10, 8\n inputs = np.random.random([samples, timesteps, features]).astype(np.float32)\n inputs[:, 3, :] = 0.\n inputs[:, 5, :] = 0.\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Masking(mask_value=0.,\n input_shape=(timesteps, features)))\n model.add(tf.keras.layers.LSTM(32))\n\n output = model(inputs)\n # The time step 3 and 5 will be skipped from LSTM calculation.\n ```\n\n See [the masking and padding guide](\n https://www.tensorflow.org/guide/keras/masking_and_padding)\n for more details.\n ", + "docstring": "Randomly crop the images to target height and width.\n\n This layer will crop all the images in the same batch to the same cropping\n location.\n By default, random cropping is only applied during training. At inference\n time, the images will be first rescaled to preserve the shorter side, and\n center cropped. If you need to apply random cropping at inference time,\n set `training` to True when calling the layer.\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`.\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n seed: Integer. Used to create a random seed.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "mask_value", - "default": 0.0 + "name": "height", + "default": null + }, + { + "name": "width", + "default": null + }, + { + "name": "seed", + "default": "None" } ], "abstract": false, @@ -2915,35 +3969,32 @@ { "name": "inputs", "default": null + }, + { + "name": "training", + "default": "True", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/preprocessing/image_preprocessing.py", "aliases": [] }, { - "name": "MaxPooling1D", - "base": "Pooling1D", - "docstring": "Max pooling operation for 1D temporal data.\n\n Downsamples the input representation by taking the maximum value over the\n window defined by `pool_size`. The window is shifted by `strides`. The\n resulting output when using \"valid\" padding option has a shape of:\n `output_shape = (input_shape - pool_size + 1) / strides)`\n\n The resulting output shape when using the \"same\" padding option is:\n `output_shape = input_shape / strides`\n\n For example, for strides=1 and padding=\"valid\":\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=1, padding='valid')\n >>> max_pool_1d(x)\n \n\n For example, for strides=2 and padding=\"valid\":\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=2, padding='valid')\n >>> max_pool_1d(x)\n \n\n For example, for strides=1 and padding=\"same\":\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=1, padding='same')\n >>> max_pool_1d(x)\n \n\n Arguments:\n pool_size: Integer, size of the max pooling window.\n strides: Integer, or None. Specifies how much the pooling window moves\n for each pooling step.\n If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n \"valid\" adds no padding. \"same\" adds padding such that if the stride\n is 1, the output shape is the same as the input shape.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, steps)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, downsampled_steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, downsampled_steps)`.\n ", + "name": "RandomFlip", + "base": "Layer", + "docstring": "Randomly flip each image horizontally and vertically.\n\n This layer will flip the images based on the `mode` attribute.\n During inference time, the output will be identical to input. Call the layer\n with `training=True` to flip the input.\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Attributes:\n mode: String indicating which flip mode to use. Can be `\"horizontal\"`,\n `\"vertical\"`, or `\"horizontal_and_vertical\"`. Defaults to\n `\"horizontal_and_vertical\"`. `\"horizontal\"` is a left-right flip and\n `\"vertical\"` is a top-bottom flip.\n seed: Integer. Used to create a random seed.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "pool_size", - "default": 2 + "name": "mode", + "default": "horizontal_and_vertical" }, { - "name": "strides", + "name": "seed", "default": "None" - }, - { - "name": "padding", - "default": "valid" - }, - { - "name": "data_format", - "default": "channels_last" } ], "abstract": false, @@ -2956,39 +4007,35 @@ { "name": "inputs", "default": null + }, + { + "name": "training", + "default": "True", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "MaxPool1D" - ] + "file": "keras/layers/preprocessing/image_preprocessing.py", + "aliases": [] }, { - "name": "MaxPooling2D", - "base": "Pooling2D", - "docstring": "Max pooling operation for 2D spatial data.\n\n Downsamples the input representation by taking the maximum value over the\n window defined by `pool_size` for each dimension along the features axis.\n The window is shifted by `strides` in each dimension. The resulting output\n when using \"valid\" padding option has a shape(number of rows or columns) of:\n `output_shape = (input_shape - pool_size + 1) / strides)`\n\n The resulting output shape when using the \"same\" padding option is:\n `output_shape = input_shape / strides`\n\n For example, for stride=(1,1) and padding=\"valid\":\n\n >>> x = tf.constant([[1., 2., 3.],\n ... [4., 5., 6.],\n ... [7., 8., 9.]])\n >>> x = tf.reshape(x, [1, 3, 3, 1])\n >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n ... strides=(1, 1), padding='valid')\n >>> max_pool_2d(x)\n \n\n For example, for stride=(2,2) and padding=\"valid\":\n\n >>> x = tf.constant([[1., 2., 3., 4.],\n ... [5., 6., 7., 8.],\n ... [9., 10., 11., 12.]])\n >>> x = tf.reshape(x, [1, 3, 4, 1])\n >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n ... strides=(1, 1), padding='valid')\n >>> max_pool_2d(x)\n \n \n Usage Example:\n \n >>> input_image = tf.constant([[[[1.], [1.], [2.], [4.]],\n ... [[2.], [2.], [3.], [2.]],\n ... [[4.], [1.], [1.], [1.]],\n ... [[2.], [2.], [1.], [4.]]]]) \n >>> output = tf.constant([[[[1], [0]],\n ... [[0], [1]]]]) \n >>> model = tf.keras.models.Sequential()\n >>> model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), \n ... input_shape=(4,4,1)))\n >>> model.compile('adam', 'mean_squared_error')\n >>> model.predict(input_image, steps=1)\n array([[[[2.],\n [4.]],\n [[4.],\n [4.]]]], dtype=float32)\n\n For example, for stride=(1,1) and padding=\"same\":\n\n >>> x = tf.constant([[1., 2., 3.],\n ... [4., 5., 6.],\n ... [7., 8., 9.]])\n >>> x = tf.reshape(x, [1, 3, 3, 1])\n >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\n ... strides=(1, 1), padding='same')\n >>> max_pool_2d(x)\n \n\n Arguments:\n pool_size: integer or tuple of 2 integers,\n window size over which to take the maximum.\n `(2, 2)` will take the max value over a 2x2 pooling window.\n If only one integer is specified, the same window length\n will be used for both dimensions.\n strides: Integer, tuple of 2 integers, or None.\n Strides values. Specifies how far the pooling window moves\n for each pooling step. If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n \"valid\" adds no zero padding. \"same\" adds padding such that if the stride\n is 1, the output shape is the same as input shape.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, rows, cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, rows, cols)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.\n - If `data_format='channels_first'`:\n 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.\n\n Returns:\n A tensor of rank 4 representing the maximum pooled values. See above for\n output shape.\n ", + "name": "RandomHeight", + "base": "Layer", + "docstring": "Randomly vary the height of a batch of images during training.\n\n Adjusts the height of a batch of images by a random factor. The input\n should be a 3D (unbatched) or 4D (batched) tensor in the `\"channels_last\"`\n image data format.\n\n By default, this layer is inactive during inference.\n\n Args:\n factor: A positive float (fraction of original height), or a tuple of size 2\n representing lower and upper bound for resizing vertically. When\n represented as a single float, this value is used for both the upper and\n lower bound. For instance, `factor=(0.2, 0.3)` results in an output with\n height changed by a random amount in the range `[20%, 30%]`.\n `factor=(-0.2, 0.3)` results in an output with height changed by a random\n amount in the range `[-20%, +30%]. `factor=0.2` results in an output with\n height changed by a random amount in the range `[-20%, +20%]`.\n interpolation: String, the interpolation method. Defaults to `\"bilinear\"`.\n Supports `\"bilinear\"`, `\"nearest\"`, `\"bicubic\"`, `\"area\"`,\n `\"lanczos3\"`, `\"lanczos5\"`, `\"gaussian\"`, `\"mitchellcubic\"`.\n seed: Integer. Used to create a random seed.\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., random_height, width, channels)`.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "pool_size", - "default": [ - 2, - 2 - ] - }, - { - "name": "strides", - "default": "None" + "name": "factor", + "default": null }, { - "name": "padding", - "default": "valid" + "name": "interpolation", + "default": "bilinear" }, { - "name": "data_format", + "name": "seed", "default": "None" } ], @@ -3002,41 +4049,44 @@ { "name": "inputs", "default": null + }, + { + "name": "training", + "default": "True", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "MaxPool2D" - ] + "file": "keras/layers/preprocessing/image_preprocessing.py", + "aliases": [] }, { - "name": "MaxPooling3D", - "base": "Pooling3D", - "docstring": "Max pooling operation for 3D data (spatial or spatio-temporal).\n\n Arguments:\n pool_size: Tuple of 3 integers,\n factors by which to downscale (dim1, dim2, dim3).\n `(2, 2, 2)` will halve the size of the 3D input in each dimension.\n strides: tuple of 3 integers, or None. Strides values.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n\n Output shape:\n - If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`\n - If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`\n ", + "name": "RandomRotation", + "base": "Layer", + "docstring": "Randomly rotate each image.\n\n By default, random rotations are only applied during training.\n At inference time, the layer does nothing. If you need to apply random\n rotations at inference time, set `training` to True when calling the layer.\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format\n\n Attributes:\n factor: a float represented as fraction of 2 Pi, or a tuple of size 2\n representing lower and upper bound for rotating clockwise and\n counter-clockwise. A positive values means rotating counter clock-wise,\n while a negative value means clock-wise. When represented as a single\n float, this value is used for both the upper and lower bound. For\n instance, `factor=(-0.2, 0.3)` results in an output rotation by a random\n amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in an\n output rotating by a random amount in the range `[-20% * 2pi, 20% * 2pi]`.\n fill_mode: Points outside the boundaries of the input are filled according\n to the given mode (one of `{\"constant\", \"reflect\", \"wrap\", \"nearest\"}`).\n - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by\n reflecting about the edge of the last pixel.\n - *constant*: `(k k k k | a b c d | k k k k)` The input is extended by\n filling all values beyond the edge with the same constant value k = 0.\n - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by\n wrapping around to the opposite edge.\n - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the\n nearest pixel.\n interpolation: Interpolation mode. Supported values: `\"nearest\"`,\n `\"bilinear\"`.\n seed: Integer. Used to create a random seed.\n fill_value: a float represents the value to be filled outside the boundaries\n when `fill_mode=\"constant\"`.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "pool_size", - "default": [ - 2, - 2, - 2 - ] + "name": "factor", + "default": null }, { - "name": "strides", - "default": "None" + "name": "fill_mode", + "default": "reflect" }, { - "name": "padding", - "default": "valid" + "name": "interpolation", + "default": "bilinear" }, { - "name": "data_format", + "name": "seed", "default": "None" + }, + { + "name": "fill_value", + "default": 0.0 } ], "abstract": false, @@ -3049,46 +4099,48 @@ { "name": "inputs", "default": null + }, + { + "name": "training", + "default": "True", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/pooling.py", - "aliases": [ - "MaxPool3D" - ] + "file": "keras/layers/preprocessing/image_preprocessing.py", + "aliases": [] }, { - "name": "Maximum", - "base": "_Merge", - "docstring": "Layer that computes the maximum (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> maxed = tf.keras.layers.Maximum()([x1, x2])\n >>> maxed.shape\n TensorShape([5, 8])\n ", + "name": "RandomTranslation", + "base": "Layer", + "docstring": "Randomly translate each image during training.\n\n Args:\n height_factor: a float represented as fraction of value, or a tuple of size\n 2 representing lower and upper bound for shifting vertically. A negative\n value means shifting image up, while a positive value means shifting image\n down. When represented as a single positive float, this value is used for\n both the upper and lower bound. For instance, `height_factor=(-0.2, 0.3)`\n results in an output shifted by a random amount in the range\n `[-20%, +30%]`.\n `height_factor=0.2` results in an output height shifted by a random amount\n in the range `[-20%, +20%]`.\n width_factor: a float represented as fraction of value, or a tuple of size 2\n representing lower and upper bound for shifting horizontally. A negative\n value means shifting image left, while a positive value means shifting\n image right. When represented as a single positive float, this value is\n used for both the upper and lower bound. For instance,\n `width_factor=(-0.2, 0.3)` results in an output shifted left by 20%, and\n shifted right by 30%. `width_factor=0.2` results in an output height\n shifted left or right by 20%.\n fill_mode: Points outside the boundaries of the input are filled according\n to the given mode (one of `{\"constant\", \"reflect\", \"wrap\", \"nearest\"}`).\n - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by\n reflecting about the edge of the last pixel.\n - *constant*: `(k k k k | a b c d | k k k k)` The input is extended by\n filling all values beyond the edge with the same constant value k = 0.\n - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by\n wrapping around to the opposite edge.\n - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the\n nearest pixel.\n interpolation: Interpolation mode. Supported values: `\"nearest\"`,\n `\"bilinear\"`.\n seed: Integer. Used to create a random seed.\n fill_value: a float represents the value to be filled outside the boundaries\n when `fill_mode=\"constant\"`.\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n ", "arguments": [ { "name": "self", "default": null - } - ], - "abstract": false, - "outputs": [], - "inputs": [ + }, { - "name": "self", + "name": "height_factor", "default": null }, { - "name": "inputs", + "name": "width_factor", "default": null - } - ], - "file": "tensorflow/python/keras/layers/merge.py", - "aliases": [] - }, - { - "name": "Minimum", - "base": "_Merge", - "docstring": "Layer that computes the minimum (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> minned = tf.keras.layers.Minimum()([x1, x2])\n >>> minned.shape\n TensorShape([5, 8])\n ", - "arguments": [ + }, { - "name": "self", - "default": null + "name": "fill_mode", + "default": "reflect" + }, + { + "name": "interpolation", + "default": "bilinear" + }, + { + "name": "seed", + "default": "None" + }, + { + "name": "fill_value", + "default": 0.0 } ], "abstract": false, @@ -3101,19 +4153,36 @@ { "name": "inputs", "default": null + }, + { + "name": "training", + "default": "True", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/merge.py", + "file": "keras/layers/preprocessing/image_preprocessing.py", "aliases": [] }, { - "name": "Multiply", - "base": "_Merge", - "docstring": "Layer that multiplies (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> multiplied = tf.keras.layers.Multiply()([x1, x2])\n >>> multiplied.shape\n TensorShape([5, 8])\n ", + "name": "RandomWidth", + "base": "Layer", + "docstring": "Randomly vary the width of a batch of images during training.\n\n Adjusts the width of a batch of images by a random factor. The input\n should be a 3D (unbatched) or 4D (batched) tensor in the `\"channels_last\"`\n image data format.\n\n By default, this layer is inactive during inference.\n\n Args:\n factor: A positive float (fraction of original height), or a tuple of size 2\n representing lower and upper bound for resizing vertically. When\n represented as a single float, this value is used for both the upper and\n lower bound. For instance, `factor=(0.2, 0.3)` results in an output with\n width changed by a random amount in the range `[20%, 30%]`. `factor=(-0.2,\n 0.3)` results in an output with width changed by a random amount in the\n range `[-20%, +30%]`. `factor=0.2` results in an output with width changed\n by a random amount in the range `[-20%, +20%]`.\n interpolation: String, the interpolation method. Defaults to `bilinear`.\n Supports `\"bilinear\"`, `\"nearest\"`, `\"bicubic\"`, `\"area\"`, `\"lanczos3\"`,\n `\"lanczos5\"`, `\"gaussian\"`, `\"mitchellcubic\"`.\n seed: Integer. Used to create a random seed.\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., random_height, width, channels)`.\n ", "arguments": [ { "name": "self", "default": null + }, + { + "name": "factor", + "default": null + }, + { + "name": "interpolation", + "default": "bilinear" + }, + { + "name": "seed", + "default": "None" } ], "abstract": false, @@ -3126,35 +4195,48 @@ { "name": "inputs", "default": null + }, + { + "name": "training", + "default": "True", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/merge.py", + "file": "keras/layers/preprocessing/image_preprocessing.py", "aliases": [] }, { - "name": "PReLU", + "name": "RandomZoom", "base": "Layer", - "docstring": "Parametric Rectified Linear Unit.\n\n It follows:\n\n ```\n f(x) = alpha * x for x < 0\n f(x) = x for x >= 0\n ```\n\n where `alpha` is a learned array with the same shape as x.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Arguments:\n alpha_initializer: Initializer function for the weights.\n alpha_regularizer: Regularizer for the weights.\n alpha_constraint: Constraint for the weights.\n shared_axes: The axes along which to share learnable\n parameters for the activation function.\n For example, if the incoming feature maps\n are from a 2D convolution\n with output shape `(batch, height, width, channels)`,\n and you wish to share parameters across space\n so that each filter only has one set of parameters,\n set `shared_axes=[1, 2]`.\n ", + "docstring": "Randomly zoom each image during training.\n\n Args:\n height_factor: a float represented as fraction of value, or a tuple of size\n 2 representing lower and upper bound for zooming vertically. When\n represented as a single float, this value is used for both the upper and\n lower bound. A positive value means zooming out, while a negative value\n means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an\n output zoomed out by a random amount in the range `[+20%, +30%]`.\n `height_factor=(-0.3, -0.2)` result in an output zoomed in by a random\n amount in the range `[+20%, +30%]`.\n width_factor: a float represented as fraction of value, or a tuple of size 2\n representing lower and upper bound for zooming horizontally. When\n represented as a single float, this value is used for both the upper and\n lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output\n zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an\n output zooming in between 20% to 30%. Defaults to `None`, i.e., zooming\n vertical and horizontal directions by preserving the aspect ratio.\n fill_mode: Points outside the boundaries of the input are filled according\n to the given mode (one of `{\"constant\", \"reflect\", \"wrap\", \"nearest\"}`).\n - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by\n reflecting about the edge of the last pixel.\n - *constant*: `(k k k k | a b c d | k k k k)` The input is extended by\n filling all values beyond the edge with the same constant value k = 0.\n - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by\n wrapping around to the opposite edge.\n - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the\n nearest pixel.\n interpolation: Interpolation mode. Supported values: `\"nearest\"`,\n `\"bilinear\"`.\n seed: Integer. Used to create a random seed.\n fill_value: a float represents the value to be filled outside the boundaries\n when `fill_mode=\"constant\"`.\n\n Example:\n\n >>> input_img = np.random.random((32, 224, 224, 3))\n >>> layer = tf.keras.layers.RandomZoom(.5, .2)\n >>> out_img = layer(input_img)\n >>> out_img.shape\n TensorShape([32, 224, 224, 3])\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "alpha_initializer", - "default": "zeros" + "name": "height_factor", + "default": null }, { - "name": "alpha_regularizer", + "name": "width_factor", "default": "None" }, { - "name": "alpha_constraint", - "default": "None" + "name": "fill_mode", + "default": "reflect" }, { - "name": "shared_axes", + "name": "interpolation", + "default": "bilinear" + }, + { + "name": "seed", "default": "None" + }, + { + "name": "fill_value", + "default": 0.0 } ], "abstract": false, @@ -3167,23 +4249,36 @@ { "name": "inputs", "default": null + }, + { + "name": "training", + "default": "True", + "type": "boolean" } ], - "file": "tensorflow/python/keras/layers/advanced_activations.py", + "file": "keras/layers/preprocessing/image_preprocessing.py", "aliases": [] }, { - "name": "Permute", + "name": "ReLU", "base": "Layer", - "docstring": "Permutes the dimensions of the input according to a given pattern.\n\n Useful e.g. connecting RNNs and convnets.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n Arguments:\n dims: Tuple of integers. Permutation pattern does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimensions\n of the input.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n ", + "docstring": "Rectified Linear Unit activation function.\n\n With default values, it returns element-wise `max(x, 0)`.\n\n Otherwise, it follows:\n\n ```\n f(x) = max_value if x >= max_value\n f(x) = x if threshold <= x < max_value\n f(x) = negative_slope * (x - threshold) otherwise\n ```\n\n Usage:\n\n >>> layer = tf.keras.layers.ReLU()\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 2.0]\n >>> layer = tf.keras.layers.ReLU(max_value=1.0)\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 1.0]\n >>> layer = tf.keras.layers.ReLU(negative_slope=1.0)\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [-3.0, -1.0, 0.0, 2.0]\n >>> layer = tf.keras.layers.ReLU(threshold=1.5)\n >>> output = layer([-3.0, -1.0, 1.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 2.0]\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n max_value: Float >= 0. Maximum activation value. Default to None, which\n means unlimited.\n negative_slope: Float >= 0. Negative slope coefficient. Default to 0.\n threshold: Float >= 0. Threshold value for thresholded activation. Default\n to 0.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "dims", - "default": null + "name": "max_value", + "default": "None" + }, + { + "name": "negative_slope", + "default": 0 + }, + { + "name": "threshold", + "default": 0 } ], "abstract": false, @@ -3198,51 +4293,21 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/advanced_activations.py", "aliases": [] }, { - "name": "RNN", + "name": "RepeatVector", "base": "Layer", - "docstring": "Base class for recurrent layers.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Arguments:\n cell: A RNN cell instance or a list of RNN cell instances.\n A RNN cell is a class that has:\n - A `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - A `state_size` attribute. This can be a single integer\n (single state) in which case it is the size of the recurrent\n state. This can also be a list/tuple of integers (one size per state).\n The `state_size` can also be TensorShape or tuple/list of\n TensorShape, to represent high dimension state.\n - A `output_size` attribute. This can be a single integer or a\n TensorShape, which represent the shape of the output. For backward\n compatible reason, if this attribute is not available for the\n cell, the value will be inferred by the first element of the\n `state_size`.\n - A `get_initial_state(inputs=None, batch_size=None, dtype=None)`\n method that creates a tensor meant to be fed to `call()` as the\n initial state, if the user didn't specify any initial state via other\n means. The returned initial state should have a shape of\n [batch_size, cell.state_size]. The cell might choose to create a\n tensor full of zeros, or full of other values based on the cell's\n implementation.\n `inputs` is the input tensor to the RNN layer, which should\n contain the batch size as its shape[0], and also dtype. Note that\n the shape[0] might be `None` during the graph construction. Either\n the `inputs` or the pair of `batch_size` and `dtype` are provided.\n `batch_size` is a scalar tensor that represents the batch size\n of the inputs. `dtype` is `tf.DType` that represents the dtype of\n the inputs.\n For backward compatible reason, if this method is not implemented\n by the cell, the RNN layer will create a zero filled tensor with the\n size of [batch_size, cell.state_size].\n In the case that `cell` is a list of RNN cell instances, the cells\n will be stacked on top of each other in the RNN, resulting in an\n efficient stacked RNN.\n return_sequences: Boolean (default `False`). Whether to return the last\n output in the output sequence, or the full sequence.\n return_state: Boolean (default `False`). Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default `False`).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default `False`). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default `False`).\n If True, the network will be unrolled, else a symbolic loop will be used.\n Unrolling can speed-up a RNN, although it tends to be more\n memory-intensive. Unrolling is only suitable for short sequences.\n time_major: The shape format of the `inputs` and `outputs` tensors.\n If True, the inputs and outputs will be in shape\n `(timesteps, batch, ...)`, whereas in the False case, it will be\n `(batch, timesteps, ...)`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n zero_output_for_mask: Boolean (default `False`).\n Whether the output should use zeros for the masked timesteps. Note that\n this field is only used when `return_sequences` is True and mask is\n provided. It can useful if you want to reuse the raw output sequence of\n the RNN without interference from the masked timesteps, eg, merging\n bidirectional RNNs.\n\n Call arguments:\n inputs: Input tensor.\n mask: Binary tensor of shape `[batch_size, timesteps]` indicating whether\n a given timestep should be masked.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is for use with cells that use dropout.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n constants: List of constant tensors to be passed to the cell at each\n timestep.\n\n Input shape:\n N-D tensor with shape `[batch_size, timesteps, ...]` or\n `[timesteps, batch_size, ...]` when time_major is True.\n\n Output shape:\n - If `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each with shape `[batch_size, state_size]`, where `state_size` could\n be a high dimension tensor shape.\n - If `return_sequences`: N-D tensor with shape\n `[batch_size, timesteps, output_size]`, where `output_size` could\n be a high dimension tensor shape, or\n `[timesteps, batch_size, output_size]` when `time_major` is True.\n - Else, N-D tensor with shape `[batch_size, output_size]`, where\n `output_size` could be a high dimension tensor shape.\n\n Masking:\n This layer supports masking for input data with a variable number\n of timesteps. To introduce masks to your data,\n use an [tf.keras.layers.Embedding] layer with the `mask_zero` parameter\n set to `True`.\n\n Note on using statefulness in RNNs:\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch. This assumes a one-to-one mapping\n between samples in different successive batches.\n\n To enable statefulness:\n - Specify `stateful=True` in the layer constructor.\n - Specify a fixed batch size for your model, by passing\n If sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\n Else for functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\n This is the expected shape of your inputs\n *including the batch size*.\n It should be a tuple of integers, e.g. `(32, 10, 100)`.\n - Specify `shuffle=False` when calling fit().\n\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n\n Note on specifying the initial state of RNNs:\n You can specify the initial state of RNN layers symbolically by\n calling them with the keyword argument `initial_state`. The value of\n `initial_state` should be a tensor or list of tensors representing\n the initial state of the RNN layer.\n\n You can specify the initial state of RNN layers numerically by\n calling `reset_states` with the keyword argument `states`. The value of\n `states` should be a numpy array or list of numpy arrays representing\n the initial state of the RNN layer.\n\n Note on passing external constants to RNNs:\n You can pass \"external\" constants to the cell using the `constants`\n keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This\n requires that the `cell.call` method accepts the same keyword argument\n `constants`. Such constants can be used to condition the cell\n transformation on additional static inputs (not changing over time),\n a.k.a. an attention mechanism.\n\n Examples:\n\n ```python\n # First, let's define a RNN Cell, as a layer subclass.\n\n class MinimalRNNCell(keras.layers.Layer):\n\n def __init__(self, units, **kwargs):\n self.units = units\n self.state_size = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = K.dot(inputs, self.kernel)\n output = h + K.dot(prev_output, self.recurrent_kernel)\n return output, [output]\n\n # Let's use this cell in a RNN layer:\n\n cell = MinimalRNNCell(32)\n x = keras.Input((None, 5))\n layer = RNN(cell)\n y = layer(x)\n\n # Here's how to use the cell to build a stacked RNN:\n\n cells = [MinimalRNNCell(32), MinimalRNNCell(64)]\n x = keras.Input((None, 5))\n layer = RNN(cells)\n y = layer(x)\n ```\n ", + "docstring": "Repeats the input n times.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Dense(32, input_dim=32))\n # now: model.output_shape == (None, 32)\n # note: `None` is the batch dimension\n\n model.add(RepeatVector(3))\n # now: model.output_shape == (None, 3, 32)\n ```\n\n Args:\n n: Integer, repetition factor.\n\n Input shape:\n 2D tensor of shape `(num_samples, features)`.\n\n Output shape:\n 3D tensor of shape `(num_samples, n, features)`.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "cell", + "name": "n", "default": null - }, - { - "name": "return_sequences", - "default": "False", - "type": "boolean" - }, - { - "name": "return_state", - "default": "False", - "type": "boolean" - }, - { - "name": "go_backwards", - "default": "False", - "type": "boolean" - }, - { - "name": "stateful", - "default": "False", - "type": "boolean" - }, - { - "name": "unroll", - "default": "False", - "type": "boolean" - }, - { - "name": "time_major", - "default": "False", - "type": "boolean" } ], "abstract": false, @@ -3255,41 +4320,27 @@ { "name": "inputs", "default": null - }, - { - "name": "initial_state", - "default": "None" - }, - { - "name": "constants", - "default": "None" } ], - "file": "tensorflow/python/keras/layers/recurrent.py", - "aliases": [ - "RNN" - ] + "file": "keras/layers/core.py", + "aliases": [] }, { - "name": "ReLU", + "name": "Rescaling", "base": "Layer", - "docstring": "Rectified Linear Unit activation function.\n\n With default values, it returns element-wise `max(x, 0)`.\n\n Otherwise, it follows:\n\n ```\n f(x) = max_value if x >= max_value\n f(x) = x if threshold <= x < max_value\n f(x) = negative_slope * (x - threshold) otherwise\n ```\n\n Usage:\n\n >>> layer = tf.keras.layers.ReLU()\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 2.0]\n >>> layer = tf.keras.layers.ReLU(max_value=1.0)\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 1.0]\n >>> layer = tf.keras.layers.ReLU(negative_slope=1.0)\n >>> output = layer([-3.0, -1.0, 0.0, 2.0])\n >>> list(output.numpy())\n [-3.0, -1.0, 0.0, 2.0]\n >>> layer = tf.keras.layers.ReLU(threshold=1.5)\n >>> output = layer([-3.0, -1.0, 1.0, 2.0])\n >>> list(output.numpy())\n [0.0, 0.0, 0.0, 2.0]\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Arguments:\n max_value: Float >= 0. Maximum activation value. Default to None, which\n means unlimited.\n negative_slope: Float >= 0. Negative slope coefficient. Default to 0.\n threshold: Float. Threshold value for thresholded activation. Default to 0.\n ", + "docstring": "Multiply inputs by `scale` and adds `offset`.\n\n For instance:\n\n 1. To rescale an input in the `[0, 255]` range\n to be in the `[0, 1]` range, you would pass `scale=1./255`.\n\n 2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,\n you would pass `scale=1./127.5, offset=-1`.\n\n The rescaling is applied both during training and inference.\n\n Input shape:\n Arbitrary.\n\n Output shape:\n Same as input.\n\n Args:\n scale: Float, the scale to apply to the inputs.\n offset: Float, the offset to apply to the inputs.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "max_value", - "default": "None" - }, - { - "name": "negative_slope", - "default": 0 + "name": "scale", + "default": null }, { - "name": "threshold", - "default": 0 + "name": "offset", + "default": 0.0 } ], "abstract": false, @@ -3304,20 +4355,20 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/advanced_activations.py", + "file": "keras/layers/preprocessing/image_preprocessing.py", "aliases": [] }, { - "name": "RepeatVector", + "name": "Reshape", "base": "Layer", - "docstring": "Repeats the input n times.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Dense(32, input_dim=32))\n # now: model.output_shape == (None, 32)\n # note: `None` is the batch dimension\n\n model.add(RepeatVector(3))\n # now: model.output_shape == (None, 3, 32)\n ```\n\n Arguments:\n n: Integer, repetition factor.\n\n Input shape:\n 2D tensor of shape `(num_samples, features)`.\n\n Output shape:\n 3D tensor of shape `(num_samples, n, features)`.\n ", + "docstring": "Layer that reshapes inputs into the given shape.\n\n Input shape:\n Arbitrary, although all dimensions in the input shape must be known/fixed.\n Use the keyword argument `input_shape` (tuple of integers, does not include\n the samples/batch size axis) when using this layer as the first layer\n in a model.\n\n Output shape:\n `(batch_size,) + target_shape`\n\n Example:\n\n >>> # as first layer in a Sequential model\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Reshape((3, 4), input_shape=(12,)))\n >>> # model.output_shape == (None, 3, 4), `None` is the batch size.\n >>> model.output_shape\n (None, 3, 4)\n\n >>> # as intermediate layer in a Sequential model\n >>> model.add(tf.keras.layers.Reshape((6, 2)))\n >>> model.output_shape\n (None, 6, 2)\n\n >>> # also supports shape inference using `-1` as dimension\n >>> model.add(tf.keras.layers.Reshape((-1, 2, 2)))\n >>> model.output_shape\n (None, 3, 2, 2)\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "n", + "name": "target_shape", "default": null } ], @@ -3333,21 +4384,34 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/core.py", "aliases": [] }, { - "name": "Reshape", + "name": "Resizing", "base": "Layer", - "docstring": "Layer that reshapes inputs into the given shape.\n\n Input shape:\n Arbitrary, although all dimensions in the input shape must be known/fixed.\n Use the keyword argument `input_shape` (tuple of integers, does not include\n the samples/batch size axis) when using this layer as the first layer\n in a model.\n\n Output shape:\n `(batch_size,) + target_shape`\n\n Example:\n\n >>> # as first layer in a Sequential model\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Reshape((3, 4), input_shape=(12,)))\n >>> # model.output_shape == (None, 3, 4), `None` is the batch size.\n >>> model.output_shape\n (None, 3, 4)\n\n >>> # as intermediate layer in a Sequential model\n >>> model.add(tf.keras.layers.Reshape((6, 2)))\n >>> model.output_shape\n (None, 6, 2)\n\n >>> # also supports shape inference using `-1` as dimension\n >>> model.add(tf.keras.layers.Reshape((-1, 2, 2)))\n >>> model.output_shape\n (None, 3, 2, 2)\n ", + "docstring": "Image resizing layer.\n\n Resize the batched image input to target height and width. The input should\n be a 4D (batched) or 3D (unbatched) tensor in `\"channels_last\"` format.\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n interpolation: String, the interpolation method. Defaults to `\"bilinear\"`.\n Supports `\"bilinear\"`, `\"nearest\"`, `\"bicubic\"`, `\"area\"`, `\"lanczos3\"`,\n `\"lanczos5\"`, `\"gaussian\"`, `\"mitchellcubic\"`.\n crop_to_aspect_ratio: If True, resize the images without aspect\n ratio distortion. When the original aspect ratio differs from the target\n aspect ratio, the output image will be cropped so as to return the largest\n possible window in the image (of size `(height, width)`) that matches\n the target aspect ratio. By default (`crop_to_aspect_ratio=False`),\n aspect ratio may not be preserved.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "target_shape", + "name": "height", + "default": null + }, + { + "name": "width", "default": null + }, + { + "name": "interpolation", + "default": "bilinear" + }, + { + "name": "crop_to_aspect_ratio", + "default": "False", + "type": "boolean" } ], "abstract": false, @@ -3362,13 +4426,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/preprocessing/image_preprocessing.py", "aliases": [] }, { "name": "SeparableConv1D", "base": "SeparableConv", - "docstring": "Depthwise separable 1D convolution.\n\n This layer performs a depthwise convolution that acts separately on\n channels, followed by a pointwise convolution that mixes channels.\n If `use_bias` is True and a bias initializer is provided,\n it adds a bias vector to the output.\n It then optionally applies an activation function to produce the final output.\n\n Arguments:\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: A single integer specifying the spatial\n dimensions of the filters.\n strides: A single integer specifying the strides\n of the convolution.\n Specifying any `stride` value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"`, or `\"causal\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, length)`.\n dilation_rate: A single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n depth_multiplier: The number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `num_filters_in * depth_multiplier`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias.\n depthwise_initializer: An initializer for the depthwise convolution kernel (\n see `keras.initializers`).\n pointwise_initializer: An initializer for the pointwise convolution kernel (\n see `keras.initializers`).\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used (see `keras.initializers`).\n depthwise_regularizer: Optional regularizer for the depthwise\n convolution kernel (see `keras.regularizers`).\n pointwise_regularizer: Optional regularizer for the pointwise\n convolution kernel (see `keras.regularizers`).\n bias_regularizer: Optional regularizer for the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Optional regularizer function for the output (\n see `keras.regularizers`).\n depthwise_constraint: Optional projection function to be applied to the\n depthwise kernel after being updated by an `Optimizer` (e.g. used for\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training (\n see `keras.constraints`).\n pointwise_constraint: Optional projection function to be applied to the\n pointwise kernel after being updated by an `Optimizer` (\n see `keras.constraints`).\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer` (\n see `keras.constraints`).\n trainable: Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).\n name: A string, the name of the layer.\n\n Input shape:\n 3D tensor with shape:\n `(batch_size, channels, steps)` if data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, steps, channels)` if data_format='channels_last'.\n\n Output shape:\n 3D tensor with shape:\n `(batch_size, filters, new_steps)` if data_format='channels_first'\n or 3D tensor with shape:\n `(batch_size, new_steps, filters)` if data_format='channels_last'.\n `new_steps` value might have changed due to padding or strides.\n\n Returns:\n A tensor of rank 3 representing\n `activation(separableconv1d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n ", + "docstring": "Depthwise separable 1D convolution.\n\n This layer performs a depthwise convolution that acts separately on\n channels, followed by a pointwise convolution that mixes channels.\n If `use_bias` is True and a bias initializer is provided,\n it adds a bias vector to the output.\n It then optionally applies an activation function to produce the final output.\n\n Args:\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: A single integer specifying the spatial\n dimensions of the filters.\n strides: A single integer specifying the strides\n of the convolution.\n Specifying any `stride` value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"`, or `\"causal\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input. `\"causal\"` results in causal\n (dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, length)`.\n dilation_rate: A single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n depth_multiplier: The number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `num_filters_in * depth_multiplier`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias.\n depthwise_initializer: An initializer for the depthwise convolution kernel (\n see `keras.initializers`). If None, then the default initializer (\n 'glorot_uniform') will be used.\n pointwise_initializer: An initializer for the pointwise convolution kernel (\n see `keras.initializers`). If None, then the default initializer \n ('glorot_uniform') will be used.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer ('zeros') will be used (see `keras.initializers`).\n depthwise_regularizer: Optional regularizer for the depthwise\n convolution kernel (see `keras.regularizers`).\n pointwise_regularizer: Optional regularizer for the pointwise\n convolution kernel (see `keras.regularizers`).\n bias_regularizer: Optional regularizer for the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Optional regularizer function for the output (\n see `keras.regularizers`).\n depthwise_constraint: Optional projection function to be applied to the\n depthwise kernel after being updated by an `Optimizer` (e.g. used for\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training (\n see `keras.constraints`).\n pointwise_constraint: Optional projection function to be applied to the\n pointwise kernel after being updated by an `Optimizer` (\n see `keras.constraints`).\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer` (\n see `keras.constraints`).\n trainable: Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).\n\n Input shape:\n 3D tensor with shape:\n `(batch_size, channels, steps)` if data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, steps, channels)` if data_format='channels_last'.\n\n Output shape:\n 3D tensor with shape:\n `(batch_size, filters, new_steps)` if data_format='channels_first'\n or 3D tensor with shape:\n `(batch_size, new_steps, filters)` if data_format='channels_last'.\n `new_steps` value might have changed due to padding or strides.\n\n Returns:\n A tensor of rank 3 representing\n `activation(separableconv1d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n ", "arguments": [ { "name": "self", @@ -3464,7 +4528,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [ "SeparableConvolution1D" ] @@ -3472,7 +4536,7 @@ { "name": "SeparableConv2D", "base": "SeparableConv", - "docstring": "Depthwise separable 2D convolution.\n\n Separable convolutions consist of first performing\n a depthwise spatial convolution\n (which acts on each input channel separately)\n followed by a pointwise convolution which mixes the resulting\n output channels. The `depth_multiplier` argument controls how many\n output channels are generated per input channel in the depthwise step.\n\n Intuitively, separable convolutions can be understood as\n a way to factorize a convolution kernel into two smaller kernels,\n or as an extreme version of an Inception block.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: An integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: Initializer for the depthwise kernel matrix (\n see `keras.initializers`).\n pointwise_initializer: Initializer for the pointwise kernel matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n depthwise_regularizer: Regularizer function applied to\n the depthwise kernel matrix (see `keras.regularizers`).\n pointwise_regularizer: Regularizer function applied to\n the pointwise kernel matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n depthwise_constraint: Constraint function applied to\n the depthwise kernel matrix (\n see `keras.constraints`).\n pointwise_constraint: Constraint function applied to\n the pointwise kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n\n Returns:\n A tensor of rank 4 representing\n `activation(separableconv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n ", + "docstring": "Depthwise separable 2D convolution.\n\n Separable convolutions consist of first performing\n a depthwise spatial convolution\n (which acts on each input channel separately)\n followed by a pointwise convolution which mixes the resulting\n output channels. The `depth_multiplier` argument controls how many\n output channels are generated per input channel in the depthwise step.\n\n Intuitively, separable convolutions can be understood as\n a way to factorize a convolution kernel into two smaller kernels,\n or as an extreme version of an Inception block.\n\n Args:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions. Current implementation only supports equal \n length strides in the row and column dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: An integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: An initializer for the depthwise convolution kernel (\n see `keras.initializers`). If None, then the default initializer (\n 'glorot_uniform') will be used.\n pointwise_initializer: An initializer for the pointwise convolution kernel (\n see `keras.initializers`). If None, then the default initializer \n ('glorot_uniform') will be used.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer ('zeros') will be used (see `keras.initializers`).\n depthwise_regularizer: Regularizer function applied to\n the depthwise kernel matrix (see `keras.regularizers`).\n pointwise_regularizer: Regularizer function applied to\n the pointwise kernel matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n depthwise_constraint: Constraint function applied to\n the depthwise kernel matrix (\n see `keras.constraints`).\n pointwise_constraint: Constraint function applied to\n the pointwise kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n\n Returns:\n A tensor of rank 4 representing\n `activation(separableconv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n ", "arguments": [ { "name": "self", @@ -3574,7 +4638,7 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [ "SeparableConvolution2D" ] @@ -3582,7 +4646,7 @@ { "name": "SimpleRNN", "base": "RNN", - "docstring": "Fully-connected RNN where the output is to be fed back to input.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for the linear transformation of the inputs.\n Default: 0.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for the linear transformation of the\n recurrent state. Default: 0.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.\n return_state: Boolean. Whether to return the last state\n in addition to the output. Default: `False`\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n Call arguments:\n inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.\n mask: Binary tensor of shape `[batch, timesteps]` indicating whether\n a given timestep should be masked.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n\n Examples:\n\n ```python\n inputs = np.random.random([32, 10, 8]).astype(np.float32)\n simple_rnn = tf.keras.layers.SimpleRNN(4)\n\n output = simple_rnn(inputs) # The output has shape `[32, 4]`.\n\n simple_rnn = tf.keras.layers.SimpleRNN(\n 4, return_sequences=True, return_state=True)\n\n # whole_sequence_output has shape `[32, 10, 4]`.\n # final_state has shape `[32, 4]`.\n whole_sequence_output, final_state = simple_rnn(inputs)\n ```\n ", + "docstring": "Fully-connected RNN where the output is to be fed back to input.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\"). Default: `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for the linear transformation of the inputs.\n Default: 0.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for the linear transformation of the\n recurrent state. Default: 0.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence. Default: `False`.\n return_state: Boolean. Whether to return the last state\n in addition to the output. Default: `False`\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n Call arguments:\n inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.\n mask: Binary tensor of shape `[batch, timesteps]` indicating whether\n a given timestep should be masked. An individual `True` entry indicates\n that the corresponding timestep should be utilized, while a `False` entry\n indicates that the corresponding timestep should be ignored.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the cell\n when calling it. This is only relevant if `dropout` or\n `recurrent_dropout` is used.\n initial_state: List of initial state tensors to be passed to the first\n call of the cell.\n\n Examples:\n\n ```python\n inputs = np.random.random([32, 10, 8]).astype(np.float32)\n simple_rnn = tf.keras.layers.SimpleRNN(4)\n\n output = simple_rnn(inputs) # The output has shape `[32, 4]`.\n\n simple_rnn = tf.keras.layers.SimpleRNN(\n 4, return_sequences=True, return_state=True)\n\n # whole_sequence_output has shape `[32, 10, 4]`.\n # final_state has shape `[32, 4]`.\n whole_sequence_output, final_state = simple_rnn(inputs)\n ```\n ", "arguments": [ { "name": "self", @@ -3699,13 +4763,13 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/recurrent.py", + "file": "keras/layers/recurrent.py", "aliases": [] }, { "name": "SimpleRNNCell", "base": "DropoutRNNCellMixin", - "docstring": "Cell class for SimpleRNN.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This class processes one step within the whole time sequence input, whereas\n `tf.keras.layer.SimpleRNN` processes the whole sequence.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n\n Call arguments:\n inputs: A 2D tensor, with shape of `[batch, feature]`.\n states: A 2D tensor with shape of `[batch, units]`, which is the state from\n the previous time step. For timestep 0, the initial state provided by user\n will be feed to cell.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n\n Examples:\n\n ```python\n inputs = np.random.random([32, 10, 8]).astype(np.float32)\n rnn = tf.keras.layers.RNN(tf.keras.layers.SimpleRNNCell(4))\n\n output = rnn(inputs) # The output has shape `[32, 4]`.\n\n rnn = tf.keras.layers.RNN(\n tf.keras.layers.SimpleRNNCell(4),\n return_sequences=True,\n return_state=True)\n\n # whole_sequence_output has shape `[32, 10, 4]`.\n # final_state has shape `[32, 4]`.\n whole_sequence_output, final_state = rnn(inputs)\n ```\n ", + "docstring": "Cell class for SimpleRNN.\n\n See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)\n for details about the usage of RNN API.\n\n This class processes one step within the whole time sequence input, whereas\n `tf.keras.layer.SimpleRNN` processes the whole sequence.\n\n Args:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, (default `True`), whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs. Default:\n `glorot_uniform`.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix, used for the linear transformation of the recurrent state.\n Default: `orthogonal`.\n bias_initializer: Initializer for the bias vector. Default: `zeros`.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix. Default: `None`.\n bias_regularizer: Regularizer function applied to the bias vector. Default:\n `None`.\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix. Default: `None`.\n recurrent_constraint: Constraint function applied to the `recurrent_kernel`\n weights matrix. Default: `None`.\n bias_constraint: Constraint function applied to the bias vector. Default:\n `None`.\n dropout: Float between 0 and 1. Fraction of the units to drop for the linear\n transformation of the inputs. Default: 0.\n recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for\n the linear transformation of the recurrent state. Default: 0.\n\n Call arguments:\n inputs: A 2D tensor, with shape of `[batch, feature]`.\n states: A 2D tensor with shape of `[batch, units]`, which is the state from\n the previous time step. For timestep 0, the initial state provided by user\n will be feed to cell.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. Only relevant when `dropout` or\n `recurrent_dropout` is used.\n\n Examples:\n\n ```python\n inputs = np.random.random([32, 10, 8]).astype(np.float32)\n rnn = tf.keras.layers.RNN(tf.keras.layers.SimpleRNNCell(4))\n\n output = rnn(inputs) # The output has shape `[32, 4]`.\n\n rnn = tf.keras.layers.RNN(\n tf.keras.layers.SimpleRNNCell(4),\n return_sequences=True,\n return_state=True)\n\n # whole_sequence_output has shape `[32, 10, 4]`.\n # final_state has shape `[32, 4]`.\n whole_sequence_output, final_state = rnn(inputs)\n ```\n ", "arguments": [ { "name": "self", @@ -3789,13 +4853,13 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/recurrent.py", + "file": "keras/layers/recurrent.py", "aliases": [] }, { "name": "Softmax", "base": "Layer", - "docstring": "Softmax activation function.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Arguments:\n axis: Integer, axis along which the softmax normalization is applied.\n ", + "docstring": "Softmax activation function.\n\n Example without mask:\n\n >>> inp = np.asarray([1., 2., 1.])\n >>> layer = tf.keras.layers.Softmax()\n >>> layer(inp).numpy()\n array([0.21194157, 0.5761169 , 0.21194157], dtype=float32)\n >>> mask = np.asarray([True, False, True], dtype=bool)\n >>> layer(inp, mask).numpy()\n array([0.5, 0. , 0.5], dtype=float32)\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n axis: Integer, or list of Integers, axis along which the softmax\n normalization is applied.\n Call arguments:\n inputs: The inputs, or logits to the softmax layer.\n mask: A boolean mask of the same shape as `inputs`. Defaults to `None`. The\n mask specifies 1 to keep and 0 to mask.\n\n Returns:\n softmaxed output with the same shape as `inputs`.\n ", "arguments": [ { "name": "self", @@ -3816,15 +4880,19 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], - "file": "tensorflow/python/keras/layers/advanced_activations.py", + "file": "keras/layers/advanced_activations.py", "aliases": [] }, { "name": "SpatialDropout1D", "base": "Dropout", - "docstring": "Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n\n Call arguments:\n inputs: A 3D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n ", + "docstring": "Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n Args:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n\n Call arguments:\n inputs: A 3D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n ", "arguments": [ { "name": "self", @@ -3851,13 +4919,13 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/core.py", "aliases": [] }, { "name": "SpatialDropout2D", "base": "Dropout", - "docstring": "Spatial 2D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 2D feature maps instead of individual elements. If adjacent pixels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout2D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension\n (the depth) is at index 1,\n in 'channels_last' mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Call arguments:\n inputs: A 4D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n ", + "docstring": "Spatial 2D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 2D feature maps instead of individual elements. If adjacent pixels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout2D will help promote independence\n between feature maps and should be used instead.\n\n Args:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension\n (the depth) is at index 1,\n in 'channels_last' mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Call arguments:\n inputs: A 4D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n ", "arguments": [ { "name": "self", @@ -3888,25 +4956,163 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/core.py", "aliases": [] }, { "name": "SpatialDropout3D", "base": "Dropout", - "docstring": "Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension (the depth)\n is at index 1, in 'channels_last' mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Call arguments:\n inputs: A 5D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 5D tensor with shape:\n `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n ", + "docstring": "Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n Args:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension (the depth)\n is at index 1, in 'channels_last' mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Call arguments:\n inputs: A 5D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 5D tensor with shape:\n `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "rate", + "default": null + }, + { + "name": "data_format", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + }, + { + "name": "training", + "default": "None" + } + ], + "file": "keras/layers/core.py", + "aliases": [] + }, + { + "name": "StackedRNNCells", + "base": "Layer", + "docstring": "Wrapper allowing a stack of RNN cells to behave as a single cell.\n\n Used to implement efficient stacked RNNs.\n\n Args:\n cells: List of RNN cell instances.\n\n Examples:\n\n ```python\n batch_size = 3\n sentence_max_length = 5\n n_features = 2\n new_shape = (batch_size, sentence_max_length, n_features)\n x = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32)\n\n rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)]\n stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)\n lstm_layer = tf.keras.layers.RNN(stacked_lstm)\n\n result = lstm_layer(x)\n ```\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "cells", + "default": null + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + }, + { + "name": "states", + "default": null + }, + { + "name": "constants", + "default": "None" + }, + { + "name": "training", + "default": "None" + } + ], + "file": "keras/layers/recurrent.py", + "aliases": [] + }, + { + "name": "StringLookup", + "base": "IndexLookup", + "docstring": "Maps strings from a vocabulary to integer indices.\n\n This layer translates a set of arbitrary strings into an integer output via a\n table-based vocabulary lookup.\n\n The vocabulary for the layer can be supplied on construction or learned via\n `adapt()`. During `adapt()`, the layer will analyze a data set, determine the\n frequency of individual strings tokens, and create a vocabulary from them. If\n the vocabulary is capped in size, the most frequent tokens will be used to\n create the vocabulary and all others will be treated as out-of-vocabulary\n (OOV).\n\n There are two possible output modes for the layer.\n When `output_mode` is `\"int\"`,\n input strings are converted to their index in the vocabulary (an integer).\n When `output_mode` is `\"multi_hot\"`, `\"count\"`, or `\"tf_idf\"`, input strings\n are encoded into an array where each dimension corresponds to an element in\n the vocabulary.\n\n The vocabulary can optionally contain a mask token as well as an OOV token\n (which can optionally occupy multiple indices in the vocabulary, as set\n by `num_oov_indices`).\n The position of these tokens in the vocabulary is fixed. When `output_mode` is\n `\"int\"`, the vocabulary will begin with the mask token (if set), followed by\n OOV indices, followed by the rest of the vocabulary. When `output_mode` is\n `\"multi_hot\"`, `\"count\"`, or `\"tf_idf\"` the vocabulary will begin with OOV\n indices and instances of the mask token will be dropped.\n\n Args:\n max_tokens: The maximum size of the vocabulary for this layer. If None,\n there is no cap on the size of the vocabulary. Note that this size\n includes the OOV and mask tokens. Default to None.\n num_oov_indices: The number of out-of-vocabulary tokens to use. If this\n value is more than 1, OOV inputs are hashed to determine their OOV value.\n If this value is 0, OOV inputs will cause an error when calling the layer.\n Defaults to 1.\n mask_token: A token that represents masked inputs. When `output_mode` is\n `\"int\"`, the token is included in vocabulary and mapped to index 0. In\n other output modes, the token will not appear in the vocabulary and\n instances of the mask token in the input will be dropped. If set to None,\n no mask term will be added. Defaults to `None`.\n oov_token: Only used when `invert` is True. The token to return for OOV\n indices. Defaults to `\"[UNK]\"`.\n vocabulary: Optional. Either an array of strings or a string path to a text\n file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D\n tensor containing the string vocbulary terms. If passing a file path, the\n file should contain one line per term in the vocabulary. If this argument\n is set, there is no need to `adapt` the layer.\n invert: Only valid when `output_mode` is `\"int\"`. If True, this layer will\n map indices to vocabulary items instead of mapping vocabulary items to\n indices. Default to False.\n output_mode: Specification for the output of the layer. Defaults to `\"int\"`.\n Values can be `\"int\"`, `\"one_hot\"`, `\"multi_hot\"`, `\"count\"`, or\n `\"tf_idf\"` configuring the layer as follows:\n - `\"int\"`: Return the raw integer indices of the input tokens.\n - `\"one_hot\"`: Encodes each individual element in the input into an\n array the same size as the vocabulary, containing a 1 at the element\n index. If the last dimension is size 1, will encode on that dimension.\n If the last dimension is not size 1, will append a new dimension for\n the encoded output.\n - `\"multi_hot\"`: Encodes each sample in the input into a single array\n the same size as the vocabulary, containing a 1 for each vocabulary\n term present in the sample. Treats the last dimension as the sample\n dimension, if input shape is (..., sample_length), output shape will\n be (..., num_tokens).\n - `\"count\"`: As `\"multi_hot\"`, but the int array contains a count of the\n number of times the token at that index appeared in the sample.\n - `\"tf_idf\"`: As `\"multi_hot\"`, but the TF-IDF algorithm is applied to\n find the value in each token slot.\n For `\"int\"` output, any shape of input and output is supported. For all\n other output modes, currently only output up to rank 2 is supported.\n pad_to_max_tokens: Only applicable when `output_mode` is `\"multi_hot\"`,\n `\"count\"`, or `\"tf_idf\"`. If True, the output will have its feature axis\n padded to `max_tokens` even if the number of unique tokens in the\n vocabulary is less than max_tokens, resulting in a tensor of shape\n [batch_size, max_tokens] regardless of vocabulary size. Defaults to False.\n sparse: Boolean. Only applicable when `output_mode` is `\"multi_hot\"`,\n `\"count\"`, or `\"tf_idf\"`. If True, returns a `SparseTensor` instead of a\n dense `Tensor`. Defaults to False.\n\n Examples:\n\n **Creating a lookup layer with a known vocabulary**\n\n This example creates a lookup layer with a pre-existing vocabulary.\n\n >>> vocab = [\"a\", \"b\", \"c\", \"d\"]\n >>> data = tf.constant([[\"a\", \"c\", \"d\"], [\"d\", \"z\", \"b\"]])\n >>> layer = tf.keras.layers.StringLookup(vocabulary=vocab)\n >>> layer(data)\n \n\n **Creating a lookup layer with an adapted vocabulary**\n\n This example creates a lookup layer and generates the vocabulary by analyzing\n the dataset.\n\n >>> data = tf.constant([[\"a\", \"c\", \"d\"], [\"d\", \"z\", \"b\"]])\n >>> layer = tf.keras.layers.StringLookup()\n >>> layer.adapt(data)\n >>> layer.get_vocabulary()\n ['[UNK]', 'd', 'z', 'c', 'b', 'a']\n\n Note that the OOV token `\"[UNK]\"` has been added to the vocabulary.\n The remaining tokens are sorted by frequency\n (`\"d\"`, which has 2 occurrences, is first) then by inverse sort order.\n\n >>> data = tf.constant([[\"a\", \"c\", \"d\"], [\"d\", \"z\", \"b\"]])\n >>> layer = tf.keras.layers.StringLookup()\n >>> layer.adapt(data)\n >>> layer(data)\n \n\n **Lookups with multiple OOV indices**\n\n This example demonstrates how to use a lookup layer with multiple OOV indices.\n When a layer is created with more than one OOV index, any OOV values are\n hashed into the number of OOV buckets, distributing OOV values in a\n deterministic fashion across the set.\n\n >>> vocab = [\"a\", \"b\", \"c\", \"d\"]\n >>> data = tf.constant([[\"a\", \"c\", \"d\"], [\"m\", \"z\", \"b\"]])\n >>> layer = tf.keras.layers.StringLookup(vocabulary=vocab, num_oov_indices=2)\n >>> layer(data)\n \n\n Note that the output for OOV value 'm' is 0, while the output for OOV value\n 'z' is 1. The in-vocab terms have their output index increased by 1 from\n earlier examples (a maps to 2, etc) in order to make space for the extra OOV\n value.\n\n **One-hot output**\n\n Configure the layer with `output_mode='one_hot'`. Note that the first\n `num_oov_indices` dimensions in the ont_hot encoding represent OOV values.\n\n >>> vocab = [\"a\", \"b\", \"c\", \"d\"]\n >>> data = tf.constant([\"a\", \"b\", \"c\", \"d\", \"z\"])\n >>> layer = tf.keras.layers.StringLookup(\n ... vocabulary=vocab, output_mode='one_hot')\n >>> layer(data)\n \n\n **Multi-hot output**\n\n Configure the layer with `output_mode='multi_hot'`. Note that the first\n `num_oov_indices` dimensions in the multi_hot encoding represent OOV values.\n\n >>> vocab = [\"a\", \"b\", \"c\", \"d\"]\n >>> data = tf.constant([[\"a\", \"c\", \"d\", \"d\"], [\"d\", \"z\", \"b\", \"z\"]])\n >>> layer = tf.keras.layers.StringLookup(\n ... vocabulary=vocab, output_mode='multi_hot')\n >>> layer(data)\n \n\n **Token count output**\n\n Configure the layer with `output_mode='count'`. As with multi_hot output, the\n first `num_oov_indices` dimensions in the output represent OOV values.\n\n >>> vocab = [\"a\", \"b\", \"c\", \"d\"]\n >>> data = tf.constant([[\"a\", \"c\", \"d\", \"d\"], [\"d\", \"z\", \"b\", \"z\"]])\n >>> layer = tf.keras.layers.StringLookup(\n ... vocabulary=vocab, output_mode='count')\n >>> layer(data)\n \n\n **TF-IDF output**\n\n Configure the layer with `output_mode=\"tf_idf\"`. As with multi_hot output, the\n first `num_oov_indices` dimensions in the output represent OOV values.\n\n Each token bin will output `token_count * idf_weight`, where the idf weights\n are the inverse document frequency weights per token. These should be provided\n along with the vocabulary. Note that the `idf_weight` for OOV values will\n default to the average of all idf weights passed in.\n\n >>> vocab = [\"a\", \"b\", \"c\", \"d\"]\n >>> idf_weights = [0.25, 0.75, 0.6, 0.4]\n >>> data = tf.constant([[\"a\", \"c\", \"d\", \"d\"], [\"d\", \"z\", \"b\", \"z\"]])\n >>> layer = tf.keras.layers.StringLookup(output_mode=\"tf_idf\")\n >>> layer.set_vocabulary(vocab, idf_weights=idf_weights)\n >>> layer(data)\n \n\n To specify the idf weights for oov values, you will need to pass the entire\n vocabularly including the leading oov token.\n\n >>> vocab = [\"[UNK]\", \"a\", \"b\", \"c\", \"d\"]\n >>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]\n >>> data = tf.constant([[\"a\", \"c\", \"d\", \"d\"], [\"d\", \"z\", \"b\", \"z\"]])\n >>> layer = tf.keras.layers.StringLookup(output_mode=\"tf_idf\")\n >>> layer.set_vocabulary(vocab, idf_weights=idf_weights)\n >>> layer(data)\n \n\n When adapting the layer in `\"tf_idf\"` mode, each input sample will be\n considered a document, and IDF weight per token will be calculated as\n `log(1 + num_documents / (1 + token_document_count))`.\n\n **Inverse lookup**\n\n This example demonstrates how to map indices to strings using this layer. (You\n can also use `adapt()` with `inverse=True`, but for simplicity we'll pass the\n vocab in this example.)\n\n >>> vocab = [\"a\", \"b\", \"c\", \"d\"]\n >>> data = tf.constant([[1, 3, 4], [4, 0, 2]])\n >>> layer = tf.keras.layers.StringLookup(vocabulary=vocab, invert=True)\n >>> layer(data)\n \n\n Note that the first index correspond to the oov token by default.\n\n\n **Forward and inverse lookup pairs**\n\n This example demonstrates how to use the vocabulary of a standard lookup\n layer to create an inverse lookup layer.\n\n >>> vocab = [\"a\", \"b\", \"c\", \"d\"]\n >>> data = tf.constant([[\"a\", \"c\", \"d\"], [\"d\", \"z\", \"b\"]])\n >>> layer = tf.keras.layers.StringLookup(vocabulary=vocab)\n >>> i_layer = tf.keras.layers.StringLookup(vocabulary=vocab, invert=True)\n >>> int_data = layer(data)\n >>> i_layer(int_data)\n \n\n In this example, the input value `\"z\"` resulted in an output of `\"[UNK]\"`,\n since 1000 was not in the vocabulary - it got represented as an OOV, and all\n OOV values are returned as `\"[UNK]\"` in the inverse layer. Also, note that\n for the inverse to work, you must have already set the forward layer\n vocabulary either directly or via `adapt()` before calling `get_vocabulary()`.\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "rate", + "name": "max_tokens", + "default": "None" + }, + { + "name": "num_oov_indices", + "default": 1 + }, + { + "name": "mask_token", + "default": "None" + }, + { + "name": "oov_token", + "default": "[UNK]" + }, + { + "name": "vocabulary", + "default": "None" + }, + { + "name": "encoding", + "default": "None" + }, + { + "name": "invert", + "default": "False", + "type": "boolean" + }, + { + "name": "output_mode", + "default": "int" + }, + { + "name": "sparse", + "default": "False", + "type": "boolean" + }, + { + "name": "pad_to_max_tokens", + "default": "False", + "type": "boolean" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "keras/layers/preprocessing/string_lookup.py", + "aliases": [] + }, + { + "name": "Subtract", + "base": "_Merge", + "docstring": "Layer that subtracts two inputs.\n\n It takes as input a list of tensors of size 2,\n both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]),\n also of the same shape.\n\n Examples:\n\n ```python\n import keras\n\n input1 = keras.layers.Input(shape=(16,))\n x1 = keras.layers.Dense(8, activation='relu')(input1)\n input2 = keras.layers.Input(shape=(32,))\n x2 = keras.layers.Dense(8, activation='relu')(input2)\n # Equivalent to subtracted = keras.layers.subtract([x1, x2])\n subtracted = keras.layers.Subtract()([x1, x2])\n\n out = keras.layers.Dense(4)(subtracted)\n model = keras.models.Model(inputs=[input1, input2], outputs=out)\n ```\n ", + "arguments": [ + { + "name": "self", "default": null - }, - { - "name": "data_format", - "default": "None" } ], "abstract": false, @@ -3919,64 +5125,52 @@ { "name": "inputs", "default": null - }, - { - "name": "training", - "default": "None" } ], - "file": "tensorflow/python/keras/layers/core.py", + "file": "keras/layers/merge.py", "aliases": [] }, { - "name": "StackedRNNCells", - "base": "Layer", - "docstring": "Wrapper allowing a stack of RNN cells to behave as a single cell.\n\n Used to implement efficient stacked RNNs.\n\n Arguments:\n cells: List of RNN cell instances.\n\n Examples:\n\n ```python\n batch_size = 3\n sentence_max_length = 5\n n_features = 2\n new_shape = (batch_size, sentence_max_length, n_features)\n x = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32)\n\n rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)]\n stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)\n lstm_layer = tf.keras.layers.RNN(stacked_lstm)\n\n result = lstm_layer(x)\n ```\n ", + "name": "TextVectorization", + "base": "PreprocessingLayer", + "docstring": "Text vectorization layer.\n\n This layer has basic options for managing text in a Keras model. It\n transforms a batch of strings (one example = one string) into either a list of\n token indices (one example = 1D tensor of integer token indices) or a dense\n representation (one example = 1D tensor of float values representing data\n about the example's tokens).\n\n If desired, the user can call this layer's `adapt()` method on a dataset.\n When this layer is adapted, it will analyze the dataset, determine the\n frequency of individual string values, and create a 'vocabulary' from them.\n This vocabulary can have unlimited size or be capped, depending on the\n configuration options for this layer; if there are more unique values in the\n input than the maximum vocabulary size, the most frequent terms will be used\n to create the vocabulary.\n\n The processing of each example contains the following steps:\n\n 1. Standardize each example (usually lowercasing + punctuation stripping)\n 2. Split each example into substrings (usually words)\n 3. Recombine substrings into tokens (usually ngrams)\n 4. Index tokens (associate a unique int value with each token)\n 5. Transform each example using this index, either into a vector of ints or\n a dense float vector.\n\n Some notes on passing callables to customize splitting and normalization for\n this layer:\n\n 1. Any callable can be passed to this Layer, but if you want to serialize\n this object you should only pass functions that are registered Keras\n serializables (see `tf.keras.utils.register_keras_serializable` for more\n details).\n 2. When using a custom callable for `standardize`, the data received\n by the callable will be exactly as passed to this layer. The callable\n should return a tensor of the same shape as the input.\n 3. When using a custom callable for `split`, the data received by the\n callable will have the 1st dimension squeezed out - instead of\n `[[\"string to split\"], [\"another string to split\"]]`, the Callable will\n see `[\"string to split\", \"another string to split\"]`. The callable should\n return a Tensor with the first dimension containing the split tokens -\n in this example, we should see something like `[[\"string\", \"to\",\n \"split\"], [\"another\", \"string\", \"to\", \"split\"]]`. This makes the callable\n site natively compatible with `tf.strings.split()`.\n\n Args:\n max_tokens: The maximum size of the vocabulary for this layer. If None,\n there is no cap on the size of the vocabulary. Note that this vocabulary\n contains 1 OOV token, so the effective number of tokens is `(max_tokens -\n 1 - (1 if output_mode == \"int\" else 0))`.\n standardize: Optional specification for standardization to apply to the\n input text. Values can be None (no standardization),\n `\"lower_and_strip_punctuation\"` (lowercase and remove punctuation) or a\n Callable. Default is `\"lower_and_strip_punctuation\"`.\n split: Optional specification for splitting the input text. Values can be\n None (no splitting), `\"whitespace\"` (split on ASCII whitespace), or a\n Callable. The default is `\"whitespace\"`.\n ngrams: Optional specification for ngrams to create from the possibly-split\n input text. Values can be None, an integer or tuple of integers; passing\n an integer will create ngrams up to that integer, and passing a tuple of\n integers will create ngrams for the specified values in the tuple. Passing\n None means that no ngrams will be created.\n output_mode: Optional specification for the output of the layer. Values can\n be `\"int\"`, `\"multi_hot\"`, `\"count\"` or `\"tf_idf\"`, configuring the layer\n as follows:\n - `\"int\"`: Outputs integer indices, one integer index per split string\n token. When `output_mode == \"int\"`, 0 is reserved for masked\n locations; this reduces the vocab size to\n `max_tokens - 2` instead of `max_tokens - 1`.\n - `\"multi_hot\"`: Outputs a single int array per batch, of either\n vocab_size or max_tokens size, containing 1s in all elements where the\n token mapped to that index exists at least once in the batch item.\n - `\"count\"`: Like `\"multi_hot\"`, but the int array contains a count of\n the number of times the token at that index appeared in the\n batch item.\n - `\"tf_idf\"`: Like `\"multi_hot\"`, but the TF-IDF algorithm is applied to\n find the value in each token slot.\n For `\"int\"` output, any shape of input and output is supported. For all\n other output modes, currently only rank 1 inputs (and rank 2 outputs after\n splitting) are supported.\n output_sequence_length: Only valid in INT mode. If set, the output will have\n its time dimension padded or truncated to exactly `output_sequence_length`\n values, resulting in a tensor of shape\n `(batch_size, output_sequence_length)` regardless of how many tokens\n resulted from the splitting step. Defaults to None.\n pad_to_max_tokens: Only valid in `\"multi_hot\"`, `\"count\"`, and `\"tf_idf\"`\n modes. If True, the output will have its feature axis padded to\n `max_tokens` even if the number of unique tokens in the vocabulary is less\n than max_tokens, resulting in a tensor of shape `(batch_size, max_tokens)`\n regardless of vocabulary size. Defaults to False.\n vocabulary: Optional. Either an array of strings or a string path to a text\n file. If passing an array, can pass a tuple, list, 1D numpy array, or 1D\n tensor containing the string vocbulary terms. If passing a file path, the\n file should contain one line per term in the vocabulary. If this argument\n is set, there is no need to `adapt` the layer.\n\n Example:\n\n This example instantiates a `TextVectorization` layer that lowercases text,\n splits on whitespace, strips punctuation, and outputs integer vocab indices.\n\n >>> text_dataset = tf.data.Dataset.from_tensor_slices([\"foo\", \"bar\", \"baz\"])\n >>> max_features = 5000 # Maximum vocab size.\n >>> max_len = 4 # Sequence length to pad the outputs to.\n >>>\n >>> # Create the layer.\n >>> vectorize_layer = tf.keras.layers.TextVectorization(\n ... max_tokens=max_features,\n ... output_mode='int',\n ... output_sequence_length=max_len)\n >>>\n >>> # Now that the vocab layer has been created, call `adapt` on the text-only\n >>> # dataset to create the vocabulary. You don't have to batch, but for large\n >>> # datasets this means we're not keeping spare copies of the dataset.\n >>> vectorize_layer.adapt(text_dataset.batch(64))\n >>>\n >>> # Create the model that uses the vectorize text layer\n >>> model = tf.keras.models.Sequential()\n >>>\n >>> # Start by creating an explicit input layer. It needs to have a shape of\n >>> # (1,) (because we need to guarantee that there is exactly one string\n >>> # input per batch), and the dtype needs to be 'string'.\n >>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string))\n >>>\n >>> # The first layer in our model is the vectorization layer. After this\n >>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab\n >>> # indices.\n >>> model.add(vectorize_layer)\n >>>\n >>> # Now, the model can map strings to integers, and you can add an embedding\n >>> # layer to map these integers to learned embeddings.\n >>> input_data = [[\"foo qux bar\"], [\"qux baz\"]]\n >>> model.predict(input_data)\n array([[2, 1, 4, 0],\n [1, 3, 0, 0]])\n\n Example:\n\n This example instantiates a `TextVectorization` layer by passing a list\n of vocabulary terms to the layer's `__init__()` method.\n\n >>> vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n >>> max_len = 4 # Sequence length to pad the outputs to.\n >>>\n >>> # Create the layer, passing the vocab directly. You can also pass the\n >>> # vocabulary arg a path to a file containing one vocabulary word per\n >>> # line.\n >>> vectorize_layer = tf.keras.layers.TextVectorization(\n ... max_tokens=max_features,\n ... output_mode='int',\n ... output_sequence_length=max_len,\n ... vocabulary=vocab_data)\n >>>\n >>> # Because we've passed the vocabulary directly, we don't need to adapt\n >>> # the layer - the vocabulary is already set. The vocabulary contains the\n >>> # padding token ('') and OOV token ('[UNK]') as well as the passed tokens.\n >>> vectorize_layer.get_vocabulary()\n ['', '[UNK]', 'earth', 'wind', 'and', 'fire']\n\n ", "arguments": [ { "name": "self", "default": null }, { - "name": "cells", - "default": null - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null + "name": "max_tokens", + "default": "None" }, { - "name": "inputs", - "default": null + "name": "standardize", + "default": "lower_and_strip_punctuation" }, { - "name": "states", - "default": null + "name": "split", + "default": "whitespace" }, { - "name": "constants", + "name": "ngrams", "default": "None" }, { - "name": "training", + "name": "output_mode", + "default": "int" + }, + { + "name": "output_sequence_length", "default": "None" - } - ], - "file": "tensorflow/python/keras/layers/recurrent.py", - "aliases": [] - }, - { - "name": "Subtract", - "base": "_Merge", - "docstring": "Layer that subtracts two inputs.\n\n It takes as input a list of tensors of size 2,\n both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]),\n also of the same shape.\n\n Examples:\n\n ```python\n import keras\n\n input1 = keras.layers.Input(shape=(16,))\n x1 = keras.layers.Dense(8, activation='relu')(input1)\n input2 = keras.layers.Input(shape=(32,))\n x2 = keras.layers.Dense(8, activation='relu')(input2)\n # Equivalent to subtracted = keras.layers.subtract([x1, x2])\n subtracted = keras.layers.Subtract()([x1, x2])\n\n out = keras.layers.Dense(4)(subtracted)\n model = keras.models.Model(inputs=[input1, input2], outputs=out)\n ```\n ", - "arguments": [ + }, { - "name": "self", - "default": null + "name": "pad_to_max_tokens", + "default": "False", + "type": "boolean" + }, + { + "name": "vocabulary", + "default": "None" } ], "abstract": false, @@ -3991,13 +5185,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/merge.py", + "file": "keras/layers/preprocessing/text_vectorization.py", "aliases": [] }, { "name": "ThresholdedReLU", "base": "Layer", - "docstring": "Thresholded Rectified Linear Unit.\n\n It follows:\n\n ```\n f(x) = x for x > theta\n f(x) = 0 otherwise`\n ```\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Arguments:\n theta: Float >= 0. Threshold location of activation.\n ", + "docstring": "Thresholded Rectified Linear Unit.\n\n It follows:\n\n ```\n f(x) = x for x > theta\n f(x) = 0 otherwise`\n ```\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n theta: Float >= 0. Threshold location of activation.\n ", "arguments": [ { "name": "self", @@ -4020,13 +5214,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/advanced_activations.py", + "file": "keras/layers/advanced_activations.py", "aliases": [] }, { "name": "TimeDistributed", "base": "Wrapper", - "docstring": "This wrapper allows to apply a layer to every temporal slice of an input.\n\n The input should be at least 3D, and the dimension of index one\n will be considered to be the temporal dimension.\n\n Consider a batch of 32 video samples, where each sample is a 128x128 RGB image\n with `channels_last` data format, across 10 timesteps.\n The batch input shape is `(32, 10, 128, 128, 3)`.\n\n You can then use `TimeDistributed` to apply a `Conv2D` layer to each of the\n 10 timesteps, independently:\n\n >>> inputs = tf.keras.Input(shape=(10, 128, 128, 3))\n >>> conv_2d_layer = tf.keras.layers.Conv2D(64, (3, 3))\n >>> outputs = tf.keras.layers.TimeDistributed(conv_2d_layer)(inputs)\n >>> outputs.shape\n TensorShape([None, 10, 126, 126, 64])\n\n Arguments:\n layer: a `tf.keras.layers.Layer` instance.\n\n Call arguments:\n inputs: Input tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the\n wrapped layer (only if the layer supports this argument).\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether\n a given timestep should be masked. This argument is passed to the\n wrapped layer (only if the layer supports this argument).\n\n Raises:\n ValueError: If not initialized with a `tf.keras.layers.Layer` instance.\n ", + "docstring": "This wrapper allows to apply a layer to every temporal slice of an input.\n\n Every input should be at least 3D, and the dimension of index one of the\n first input will be considered to be the temporal dimension.\n\n Consider a batch of 32 video samples, where each sample is a 128x128 RGB image\n with `channels_last` data format, across 10 timesteps.\n The batch input shape is `(32, 10, 128, 128, 3)`.\n\n You can then use `TimeDistributed` to apply the same `Conv2D` layer to each\n of the 10 timesteps, independently:\n\n >>> inputs = tf.keras.Input(shape=(10, 128, 128, 3))\n >>> conv_2d_layer = tf.keras.layers.Conv2D(64, (3, 3))\n >>> outputs = tf.keras.layers.TimeDistributed(conv_2d_layer)(inputs)\n >>> outputs.shape\n TensorShape([None, 10, 126, 126, 64])\n\n Because `TimeDistributed` applies the same instance of `Conv2D` to each of the\n timestamps, the same set of weights are used at each timestamp.\n\n Args:\n layer: a `tf.keras.layers.Layer` instance.\n\n Call arguments:\n inputs: Input tensor of shape (batch, time, ...) or nested tensors,\n and each of which has shape (batch, time, ...).\n training: Python boolean indicating whether the layer should behave in\n training mode or in inference mode. This argument is passed to the\n wrapped layer (only if the layer supports this argument).\n mask: Binary tensor of shape `(samples, timesteps)` indicating whether\n a given timestep should be masked. This argument is passed to the\n wrapped layer (only if the layer supports this argument).\n\n Raises:\n ValueError: If not initialized with a `tf.keras.layers.Layer` instance.\n ", "arguments": [ { "name": "self", @@ -4057,13 +5251,13 @@ "default": "None" } ], - "file": "tensorflow/python/keras/layers/wrappers.py", + "file": "keras/layers/wrappers.py", "aliases": [] }, { "name": "UpSampling1D", "base": "Layer", - "docstring": "Upsampling layer for 1D inputs.\n\n Repeats each temporal step `size` times along the time axis.\n\n Examples:\n\n >>> input_shape = (2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n >>> y = tf.keras.layers.UpSampling1D(size=2)(x)\n >>> print(y)\n tf.Tensor(\n [[[ 0 1 2]\n [ 0 1 2]\n [ 3 4 5]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 6 7 8]\n [ 9 10 11]\n [ 9 10 11]]], shape=(2, 4, 3), dtype=int64)\n\n Arguments:\n size: Integer. Upsampling factor.\n\n Input shape:\n 3D tensor with shape: `(batch_size, steps, features)`.\n\n Output shape:\n 3D tensor with shape: `(batch_size, upsampled_steps, features)`.\n ", + "docstring": "Upsampling layer for 1D inputs.\n\n Repeats each temporal step `size` times along the time axis.\n\n Examples:\n\n >>> input_shape = (2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n >>> y = tf.keras.layers.UpSampling1D(size=2)(x)\n >>> print(y)\n tf.Tensor(\n [[[ 0 1 2]\n [ 0 1 2]\n [ 3 4 5]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 6 7 8]\n [ 9 10 11]\n [ 9 10 11]]], shape=(2, 4, 3), dtype=int64)\n\n Args:\n size: Integer. Upsampling factor.\n\n Input shape:\n 3D tensor with shape: `(batch_size, steps, features)`.\n\n Output shape:\n 3D tensor with shape: `(batch_size, upsampled_steps, features)`.\n ", "arguments": [ { "name": "self", @@ -4086,13 +5280,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { "name": "UpSampling2D", "base": "Layer", - "docstring": "Upsampling layer for 2D inputs.\n\n Repeats the rows and columns of the data\n by `size[0]` and `size[1]` respectively.\n\n Examples:\n\n >>> input_shape = (2, 2, 1, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[[ 0 1 2]]\n [[ 3 4 5]]]\n [[[ 6 7 8]]\n [[ 9 10 11]]]]\n >>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x)\n >>> print(y)\n tf.Tensor(\n [[[[ 0 1 2]\n [ 0 1 2]]\n [[ 3 4 5]\n [ 3 4 5]]]\n [[[ 6 7 8]\n [ 6 7 8]]\n [[ 9 10 11]\n [ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)\n\n Arguments:\n size: Int, or tuple of 2 integers.\n The upsampling factors for rows and columns.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n interpolation: A string, one of `nearest` or `bilinear`.\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_rows, upsampled_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_rows, upsampled_cols)`\n ", + "docstring": "Upsampling layer for 2D inputs.\n\n Repeats the rows and columns of the data\n by `size[0]` and `size[1]` respectively.\n\n Examples:\n\n >>> input_shape = (2, 2, 1, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[[ 0 1 2]]\n [[ 3 4 5]]]\n [[[ 6 7 8]]\n [[ 9 10 11]]]]\n >>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x)\n >>> print(y)\n tf.Tensor(\n [[[[ 0 1 2]\n [ 0 1 2]]\n [[ 3 4 5]\n [ 3 4 5]]]\n [[[ 6 7 8]\n [ 6 7 8]]\n [[ 9 10 11]\n [ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)\n\n Args:\n size: Int, or tuple of 2 integers.\n The upsampling factors for rows and columns.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n interpolation: A string, one of `nearest` or `bilinear`.\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_rows, upsampled_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_rows, upsampled_cols)`\n ", "arguments": [ { "name": "self", @@ -4126,13 +5320,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { "name": "UpSampling3D", "base": "Layer", - "docstring": "Upsampling layer for 3D inputs.\n\n Repeats the 1st, 2nd and 3rd dimensions\n of the data by `size[0]`, `size[1]` and `size[2]` respectively.\n\n Examples:\n\n >>> input_shape = (2, 1, 2, 1, 3)\n >>> x = tf.constant(1, shape=input_shape)\n >>> y = tf.keras.layers.UpSampling3D(size=2)(x)\n >>> print(y.shape)\n (2, 2, 4, 2, 3)\n\n Arguments:\n size: Int, or tuple of 3 integers.\n The upsampling factors for dim1, dim2 and dim3.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, dim1, dim2, dim3, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, dim1, dim2, dim3)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`\n ", + "docstring": "Upsampling layer for 3D inputs.\n\n Repeats the 1st, 2nd and 3rd dimensions\n of the data by `size[0]`, `size[1]` and `size[2]` respectively.\n\n Examples:\n\n >>> input_shape = (2, 1, 2, 1, 3)\n >>> x = tf.constant(1, shape=input_shape)\n >>> y = tf.keras.layers.UpSampling3D(size=2)(x)\n >>> print(y.shape)\n (2, 2, 4, 2, 3)\n\n Args:\n size: Int, or tuple of 3 integers.\n The upsampling factors for dim1, dim2 and dim3.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, dim1, dim2, dim3, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, dim1, dim2, dim3)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`\n ", "arguments": [ { "name": "self", @@ -4163,13 +5357,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { "name": "ZeroPadding1D", "base": "Layer", - "docstring": "Zero-padding layer for 1D input (e.g. temporal sequence).\n\n Examples:\n\n >>> input_shape = (2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n >>> y = tf.keras.layers.ZeroPadding1D(padding=2)(x)\n >>> print(y)\n tf.Tensor(\n [[[ 0 0 0]\n [ 0 0 0]\n [ 0 1 2]\n [ 3 4 5]\n [ 0 0 0]\n [ 0 0 0]]\n [[ 0 0 0]\n [ 0 0 0]\n [ 6 7 8]\n [ 9 10 11]\n [ 0 0 0]\n [ 0 0 0]]], shape=(2, 6, 3), dtype=int64)\n\n Arguments:\n padding: Int, or tuple of int (length 2), or dictionary.\n - If int:\n How many zeros to add at the beginning and end of\n the padding dimension (axis 1).\n - If tuple of int (length 2):\n How many zeros to add at the beginning and the end of\n the padding dimension (`(left_pad, right_pad)`).\n\n Input shape:\n 3D tensor with shape `(batch_size, axis_to_pad, features)`\n\n Output shape:\n 3D tensor with shape `(batch_size, padded_axis, features)`\n ", + "docstring": "Zero-padding layer for 1D input (e.g. temporal sequence).\n\n Examples:\n\n >>> input_shape = (2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n >>> y = tf.keras.layers.ZeroPadding1D(padding=2)(x)\n >>> print(y)\n tf.Tensor(\n [[[ 0 0 0]\n [ 0 0 0]\n [ 0 1 2]\n [ 3 4 5]\n [ 0 0 0]\n [ 0 0 0]]\n [[ 0 0 0]\n [ 0 0 0]\n [ 6 7 8]\n [ 9 10 11]\n [ 0 0 0]\n [ 0 0 0]]], shape=(2, 6, 3), dtype=int64)\n\n Args:\n padding: Int, or tuple of int (length 2), or dictionary.\n - If int:\n How many zeros to add at the beginning and end of\n the padding dimension (axis 1).\n - If tuple of int (length 2):\n How many zeros to add at the beginning and the end of\n the padding dimension (`(left_pad, right_pad)`).\n\n Input shape:\n 3D tensor with shape `(batch_size, axis_to_pad, features)`\n\n Output shape:\n 3D tensor with shape `(batch_size, padded_axis, features)`\n ", "arguments": [ { "name": "self", @@ -4192,13 +5386,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { "name": "ZeroPadding2D", "base": "Layer", - "docstring": "Zero-padding layer for 2D input (e.g. picture).\n\n This layer can add rows and columns of zeros\n at the top, bottom, left and right side of an image tensor.\n\n Examples:\n\n >>> input_shape = (1, 1, 2, 2)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[[0 1]\n [2 3]]]]\n >>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x)\n >>> print(y)\n tf.Tensor(\n [[[[0 0]\n [0 0]\n [0 0]\n [0 0]]\n [[0 0]\n [0 1]\n [2 3]\n [0 0]]\n [[0 0]\n [0 0]\n [0 0]\n [0 0]]]], shape=(1, 3, 4, 2), dtype=int64)\n\n Arguments:\n padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric padding\n is applied to height and width.\n - If tuple of 2 ints:\n interpreted as two different\n symmetric padding values for height and width:\n `(symmetric_height_pad, symmetric_width_pad)`.\n - If tuple of 2 tuples of 2 ints:\n interpreted as\n `((top_pad, bottom_pad), (left_pad, right_pad))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, padded_rows, padded_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, padded_rows, padded_cols)`\n ", + "docstring": "Zero-padding layer for 2D input (e.g. picture).\n\n This layer can add rows and columns of zeros\n at the top, bottom, left and right side of an image tensor.\n\n Examples:\n\n >>> input_shape = (1, 1, 2, 2)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[[0 1]\n [2 3]]]]\n >>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x)\n >>> print(y)\n tf.Tensor(\n [[[[0 0]\n [0 0]\n [0 0]\n [0 0]]\n [[0 0]\n [0 1]\n [2 3]\n [0 0]]\n [[0 0]\n [0 0]\n [0 0]\n [0 0]]]], shape=(1, 3, 4, 2), dtype=int64)\n\n Args:\n padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric padding\n is applied to height and width.\n - If tuple of 2 ints:\n interpreted as two different\n symmetric padding values for height and width:\n `(symmetric_height_pad, symmetric_width_pad)`.\n - If tuple of 2 tuples of 2 ints:\n interpreted as\n `((top_pad, bottom_pad), (left_pad, right_pad))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, padded_rows, padded_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, padded_rows, padded_cols)`\n ", "arguments": [ { "name": "self", @@ -4228,13 +5422,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { "name": "ZeroPadding3D", "base": "Layer", - "docstring": "Zero-padding layer for 3D data (spatial or spatio-temporal).\n\n Examples:\n\n >>> input_shape = (1, 1, 2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.ZeroPadding3D(padding=2)(x)\n >>> print(y.shape)\n (1, 5, 6, 6, 3)\n\n Arguments:\n padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric padding\n is applied to height and width.\n - If tuple of 3 ints:\n interpreted as two different\n symmetric padding values for height and width:\n `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.\n - If tuple of 3 tuples of 2 ints:\n interpreted as\n `((left_dim1_pad, right_dim1_pad), (left_dim2_pad,\n right_dim2_pad), (left_dim3_pad, right_dim3_pad))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_axis_to_pad, second_axis_to_pad,\n third_axis_to_pad)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_padded_axis, second_padded_axis, third_axis_to_pad,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_padded_axis, second_padded_axis,\n third_axis_to_pad)`\n ", + "docstring": "Zero-padding layer for 3D data (spatial or spatio-temporal).\n\n Examples:\n\n >>> input_shape = (1, 1, 2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.ZeroPadding3D(padding=2)(x)\n >>> print(y.shape)\n (1, 5, 6, 6, 3)\n\n Args:\n padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric padding\n is applied to height and width.\n - If tuple of 3 ints:\n interpreted as two different\n symmetric padding values for height and width:\n `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.\n - If tuple of 3 tuples of 2 ints:\n interpreted as\n `((left_dim1_pad, right_dim1_pad), (left_dim2_pad,\n right_dim2_pad), (left_dim3_pad, right_dim3_pad))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_axis_to_pad, second_axis_to_pad,\n third_axis_to_pad)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_padded_axis, second_padded_axis, third_axis_to_pad,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_padded_axis, second_padded_axis,\n third_axis_to_pad)`\n ", "arguments": [ { "name": "self", @@ -4265,231 +5459,13 @@ "default": null } ], - "file": "tensorflow/python/keras/layers/convolutional.py", - "aliases": [] - }, - { - "name": "CuDNNGRU", - "base": "_CuDNNRNN", - "docstring": "Fast GRU implementation backed by cuDNN.\n\n More information about cuDNN can be found on the [NVIDIA\n developer website](https://developer.nvidia.com/cudnn).\n Can only be run on GPU.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\").\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix.\n recurrent_constraint: Constraint function applied to the\n `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n return_sequences: Boolean. Whether to return the last output in the output\n sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state in addition to the\n output.\n go_backwards: Boolean (default False). If True, process the input sequence\n backwards and return the reversed sequence.\n stateful: Boolean (default False). If True, the last state for each sample\n at index i in a batch will be used as initial state for the sample of\n index i in the following batch.\n ", - "arguments": [ - { - "name": "self", - "default": null - }, - { - "name": "units", - "default": null - }, - { - "name": "kernel_initializer", - "default": "glorot_uniform" - }, - { - "name": "recurrent_initializer", - "default": "orthogonal" - }, - { - "name": "bias_initializer", - "default": "zeros" - }, - { - "name": "kernel_regularizer", - "default": "None" - }, - { - "name": "recurrent_regularizer", - "default": "None" - }, - { - "name": "bias_regularizer", - "default": "None" - }, - { - "name": "activity_regularizer", - "default": "None" - }, - { - "name": "kernel_constraint", - "default": "None" - }, - { - "name": "recurrent_constraint", - "default": "None" - }, - { - "name": "bias_constraint", - "default": "None" - }, - { - "name": "return_sequences", - "default": "False", - "type": "boolean" - }, - { - "name": "return_state", - "default": "False", - "type": "boolean" - }, - { - "name": "go_backwards", - "default": "False", - "type": "boolean" - }, - { - "name": "stateful", - "default": "False", - "type": "boolean" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null - }, - { - "name": "inputs", - "default": null - }, - { - "name": "mask", - "default": "None" - }, - { - "name": "training", - "default": "None" - }, - { - "name": "initial_state", - "default": "None" - } - ], - "file": "tensorflow/python/keras/layers/cudnn_recurrent.py", - "aliases": [] - }, - { - "name": "CuDNNLSTM", - "base": "_CuDNNRNN", - "docstring": "Fast LSTM implementation backed by cuDNN.\n\n More information about cuDNN can be found on the [NVIDIA\n developer website](https://developer.nvidia.com/cudnn).\n Can only be run on GPU.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n kernel_initializer: Initializer for the `kernel` weights matrix, used for\n the linear transformation of the inputs.\n unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate\n at initialization. Setting it to true will also force\n `bias_initializer=\"zeros\"`. This is recommended in [Jozefowicz et\n al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n recurrent_initializer: Initializer for the `recurrent_kernel` weights\n matrix, used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n recurrent_regularizer: Regularizer function applied to the\n `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\").\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix.\n recurrent_constraint: Constraint function applied to the\n `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n return_sequences: Boolean. Whether to return the last output. in the\n output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state in addition to the\n output.\n go_backwards: Boolean (default False). If True, process the input sequence\n backwards and return the reversed sequence.\n stateful: Boolean (default False). If True, the last state for each sample\n at index i in a batch will be used as initial state for the sample of\n index i in the following batch.\n ", - "arguments": [ - { - "name": "self", - "default": null - }, - { - "name": "units", - "default": null - }, - { - "name": "kernel_initializer", - "default": "glorot_uniform" - }, - { - "name": "recurrent_initializer", - "default": "orthogonal" - }, - { - "name": "bias_initializer", - "default": "zeros" - }, - { - "name": "unit_forget_bias", - "default": "True", - "type": "boolean" - }, - { - "name": "kernel_regularizer", - "default": "None" - }, - { - "name": "recurrent_regularizer", - "default": "None" - }, - { - "name": "bias_regularizer", - "default": "None" - }, - { - "name": "activity_regularizer", - "default": "None" - }, - { - "name": "kernel_constraint", - "default": "None" - }, - { - "name": "recurrent_constraint", - "default": "None" - }, - { - "name": "bias_constraint", - "default": "None" - }, - { - "name": "return_sequences", - "default": "False", - "type": "boolean" - }, - { - "name": "return_state", - "default": "False", - "type": "boolean" - }, - { - "name": "go_backwards", - "default": "False", - "type": "boolean" - }, - { - "name": "stateful", - "default": "False", - "type": "boolean" - } - ], - "abstract": false, - "outputs": [], - "inputs": [ - { - "name": "self", - "default": null - }, - { - "name": "inputs", - "default": null - }, - { - "name": "mask", - "default": "None" - }, - { - "name": "training", - "default": "None" - }, - { - "name": "initial_state", - "default": "None" - } - ], - "file": "tensorflow/python/keras/layers/cudnn_recurrent.py", - "aliases": [] - }, - { - "name": "_CuDNNRNN", - "base": "RNN", - "docstring": "Private base class for CuDNNGRU and CuDNNLSTM layers.\n\n Arguments:\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n time_major: Boolean (default False). If true, the inputs and outputs will be\n in shape `(timesteps, batch, ...)`, whereas in the False case, it will\n be `(batch, timesteps, ...)`.\n ", - "arguments": null, - "abstract": true, - "outputs": [], - "inputs": null, - "file": "tensorflow/python/keras/layers/cudnn_recurrent.py", + "file": "keras/layers/convolutional.py", "aliases": [] }, { "name": "AGNNConv", "base": "MessagePassing", - "docstring": "\n An Attention-based Graph Neural Network (AGNN) from the paper\n\n > [Attention-based Graph Neural Network for Semi-supervised Learning](https://arxiv.org/abs/1803.03735)
\n > Kiran K. Thekumparampil et al.\n\n **Mode**: single, disjoint.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\X' = \\P\\X\n $$\n where\n $$\n \\P_{ij} = \\frac{\n \\exp \\left( \\beta \\cos \\left( \\x_i, \\x_j \\right) \\right)\n }{\n \\sum\\limits_{k \\in \\mathcal{N}(i) \\cup \\{ i \\}}\n \\exp \\left( \\beta \\cos \\left( \\x_i, \\x_k \\right) \\right)\n }\n $$\n and \\(\\beta\\) is a trainable parameter.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input.\n\n **Arguments**\n\n - `trainable`: boolean, if True, then beta is a trainable parameter.\n Otherwise, beta is fixed to 1;\n - `activation`: activation function;\n ", + "docstring": "\n An Attention-based Graph Neural Network (AGNN) from the paper\n\n > [Attention-based Graph Neural Network for Semi-supervised Learning](https://arxiv.org/abs/1803.03735)
\n > Kiran K. Thekumparampil et al.\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\X' = \\P\\X\n $$\n where\n $$\n \\P_{ij} = \\frac{\n \\exp \\left( \\beta \\cos \\left( \\x_i, \\x_j \\right) \\right)\n }{\n \\sum\\limits_{k \\in \\mathcal{N}(i) \\cup \\{ i \\}}\n \\exp \\left( \\beta \\cos \\left( \\x_i, \\x_k \\right) \\right)\n }\n $$\n and \\(\\beta\\) is a trainable parameter.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input.\n\n **Arguments**\n\n - `trainable`: boolean, if True, then beta is a trainable parameter.\n Otherwise, beta is fixed to 1;\n - `activation`: activation function;\n ", "arguments": [ { "name": "self", @@ -4605,6 +5581,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/convolutional/appnp_conv.py", @@ -4692,6 +5672,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/convolutional/arma_conv.py", @@ -4762,6 +5746,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/convolutional/cheb_conv.py", @@ -4770,7 +5758,7 @@ { "name": "CrystalConv", "base": "MessagePassing", - "docstring": "\n A crystal graph convolutional layer from the paper\n\n > [Crystal Graph Convolutional Neural Networks for an Accurate and\n Interpretable Prediction of Material Properties](https://arxiv.org/abs/1710.10324)
\n > Tian Xie and Jeffrey C. Grossman\n\n **Mode**: single, disjoint.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\x_i' = \\x_i + \\sum\\limits_{j \\in \\mathcal{N}(i)} \\sigma \\left( \\z_{ij}\n \\W^{(f)} + \\b^{(f)} \\right) \\odot \\g \\left( \\z_{ij} \\W^{(s)} + \\b^{(s)}\n \\right)\n $$\n where \\(\\z_{ij} = \\X_i \\| \\X_j \\| \\E_{ij} \\), \\(\\sigma\\) is a sigmoid\n activation, and \\(g\\) is the activation function (defined by the `activation`\n argument).\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n - Edge features of shape `(num_edges, n_edge_features)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", + "docstring": "\n A crystal graph convolutional layer from the paper\n\n > [Crystal Graph Convolutional Neural Networks for an Accurate and\n Interpretable Prediction of Material Properties](https://arxiv.org/abs/1710.10324)
\n > Tian Xie and Jeffrey C. Grossman\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\x_i' = \\x_i + \\sum\\limits_{j \\in \\mathcal{N}(i)} \\sigma \\left( \\z_{ij}\n \\W^{(f)} + \\b^{(f)} \\right) \\odot \\g \\left( \\z_{ij} \\W^{(s)} + \\b^{(s)}\n \\right)\n $$\n where \\(\\z_{ij} = \\x_i \\| \\x_j \\| \\e_{ji} \\), \\(\\sigma\\) is a sigmoid\n activation, and \\(g\\) is the activation function (defined by the `activation`\n argument).\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n - Edge features of shape `(num_edges, n_edge_features)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", "arguments": [ { "name": "self", @@ -4840,7 +5828,7 @@ { "name": "DiffPool", "base": "Pool", - "docstring": "\n A DiffPool layer from the paper\n\n > [Hierarchical Graph Representation Learning with Differentiable Pooling](https://arxiv.org/abs/1806.08804)
\n > Rex Ying et al.\n\n **Mode**: batch.\n\n This layer computes a soft clustering \\(\\S\\) of the input graphs using a GNN,\n and reduces graphs as follows:\n $$\n \\S = \\textrm{GNN}(\\A, \\X); \\\\\n \\A' = \\S^\\top \\A \\S; \\X' = \\S^\\top \\X;\n $$\n\n where GNN consists of one GraphConv layer with softmax activation.\n Two auxiliary loss terms are also added to the model: the _link prediction\n loss_\n $$\n \\big\\| \\A - \\S\\S^\\top \\big\\|_F\n $$\n and the _entropy loss_\n $$\n - \\frac{1}{N} \\sum\\limits_{i = 1}^{N} \\S \\log (\\S).\n $$\n\n The layer also applies a 1-layer GCN to the input features, and returns\n the updated graph signal (the number of output channels is controlled by\n the `channels` parameter).\n The layer can be used without a supervised loss, to compute node clustering\n simply by minimizing the two auxiliary losses.\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `([batch], n_nodes, n_nodes)`;\n\n **Output**\n\n - Reduced node features of shape `([batch], K, channels)`;\n - Reduced adjacency matrix of shape `([batch], K, K)`;\n - If `return_mask=True`, the soft clustering matrix of shape `([batch], n_nodes, K)`.\n\n **Arguments**\n\n - `k`: number of nodes to keep;\n - `channels`: number of output channels (if None, the number of output\n channels is assumed to be the same as the input);\n - `return_mask`: boolean, whether to return the cluster assignment matrix;\n - `kernel_initializer`: initializer for the weights;\n - `kernel_regularizer`: regularization applied to the weights;\n - `kernel_constraint`: constraint applied to the weights;\n ", + "docstring": "\n A DiffPool layer from the paper\n\n > [Hierarchical Graph Representation Learning with Differentiable Pooling](https://arxiv.org/abs/1806.08804)
\n > Rex Ying et al.\n\n **Mode**: batch.\n\n This layer computes a soft clustering \\(\\S\\) of the input graphs using a GNN,\n and reduces graphs as follows:\n $$\n \\begin{align}\n \\S &= \\textrm{GNN}_{embed}(\\A, \\X); \\\\\n \\Z &= \\textrm{GNN}_{pool}(\\A, \\X); \\\\\n \\A' &= \\S^\\top \\A \\S; \\\\\n \\X' &= \\S^\\top \\Z\n \\end{align}\n $$\n where:\n $$\n \\textrm{GNN}_{\\square}(\\A, \\X) = \\D^{-1/2} \\A \\D^{-1/2} \\X \\W_{\\square}.\n $$\n The number of output channels of \\(\\textrm{GNN}_{embed}\\) is controlled by \n the `channels` parameter.\n\n Two auxiliary loss terms are also added to the model: the _link prediction\n loss_\n $$\n L_{LP} = \\big\\| \\A - \\S\\S^\\top \\big\\|_F\n $$\n and the _entropy loss_\n $$\n L_{E} - \\frac{1}{N} \\sum\\limits_{i = 1}^{N} \\S \\log (\\S).\n $$\n\n The layer can be used without a supervised loss, to compute node clustering\n simply by minimizing the two auxiliary losses.\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Adjacency matrix of shape `([batch], n_nodes, n_nodes)`;\n\n **Output**\n\n - Reduced node features of shape `([batch], K, channels)`;\n - Reduced adjacency matrix of shape `([batch], K, K)`;\n - If `return_mask=True`, the soft clustering matrix of shape `([batch], n_nodes, K)`.\n\n **Arguments**\n\n - `k`: number of output nodes;\n - `channels`: number of output channels (if None, the number of output\n channels is assumed to be the same as the input);\n - `return_mask`: boolean, whether to return the cluster assignment matrix;\n - `kernel_initializer`: initializer for the weights;\n - `kernel_regularizer`: regularization applied to the weights;\n - `kernel_constraint`: constraint applied to the weights;\n ", "arguments": [ { "name": "self", @@ -4886,6 +5874,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/pooling/diff_pool.py", @@ -4894,7 +5886,7 @@ { "name": "DiffusionConv", "base": "Conv", - "docstring": "\n A diffusion convolution operator from the paper\n\n > [Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic\n Forecasting](https://arxiv.org/abs/1707.01926)
\n > Yaguang Li et al.\n\n **Mode**: single, disjoint, mixed, batch.\n\n **This layer expects a dense adjacency matrix.**\n\n Given a number of diffusion steps \\(K\\) and a row-normalized adjacency\n matrix \\(\\hat \\A \\), this layer calculates the \\(q\\)-th channel as:\n $$\n \\mathbf{X}_{~:,~q}' = \\sigma\\left( \\sum_{f=1}^{F} \\left( \\sum_{k=0}^{K-1}\n \\theta_k {\\hat \\A}^k \\right) \\X_{~:,~f} \\right)\n $$\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Normalized adjacency or attention coef. matrix \\(\\hat \\A \\) of shape\n `([batch], n_nodes, n_nodes)`; Use `DiffusionConvolution.preprocess` to normalize.\n\n **Output**\n\n - Node features with the same shape as the input, but with the last\n dimension changed to `channels`.\n\n **Arguments**\n\n - `channels`: number of output channels;\n - `K`: number of diffusion steps.\n - `activation`: activation function \\(\\sigma\\); (\\(\\tanh\\) by default)\n - `kernel_initializer`: initializer for the weights;\n - `kernel_regularizer`: regularization applied to the weights;\n - `kernel_constraint`: constraint applied to the weights;\n ", + "docstring": "\n A diffusion convolution operator from the paper\n\n > [Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic\n Forecasting](https://arxiv.org/abs/1707.01926)
\n > Yaguang Li et al.\n\n **Mode**: single, disjoint, mixed, batch.\n\n **This layer expects a dense adjacency matrix.**\n\n Given a number of diffusion steps \\(K\\) and a row-normalized adjacency\n matrix \\(\\hat \\A \\), this layer calculates the \\(q\\)-th channel as:\n $$\n \\mathbf{X}_{~:,~q}' = \\sigma\\left( \\sum_{f=1}^{F} \\left( \\sum_{k=0}^{K-1}\n \\theta_k {\\hat \\A}^k \\right) \\X_{~:,~f} \\right)\n $$\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Normalized adjacency or attention coef. matrix \\(\\hat \\A \\) of shape\n `([batch], n_nodes, n_nodes)`; Use `DiffusionConvolution.preprocess` to normalize.\n\n **Output**\n\n - Node features with the same shape as the input, but with the last\n dimension changed to `channels`.\n\n **Arguments**\n\n - `channels`: number of output channels;\n - `K`: number of diffusion steps.\n - `activation`: activation function \\(\\sigma\\); (\\(\\tanh\\) by default)\n - `kernel_initializer`: initializer for the weights;\n - `kernel_regularizer`: regularization applied to the weights;\n - `kernel_constraint`: constraint applied to the weights;\n ", "arguments": [ { "name": "self", @@ -4935,6 +5927,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/convolutional/diffusion_conv.py", @@ -4968,7 +5964,7 @@ { "name": "ECCConv", "base": "Conv", - "docstring": "\n An edge-conditioned convolutional layer (ECC) from the paper\n\n > [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on\n Graphs](https://arxiv.org/abs/1704.02901)
\n > Martin Simonovsky and Nikos Komodakis\n\n **Mode**: single, disjoint, batch.\n\n This layer computes:\n $$\n \\x_i' = \\x_{i} \\W_{\\textrm{root}} + \\sum\\limits_{j \\in \\mathcal{N}(i)}\n \\x_{j} \\textrm{MLP}(\\e_{j \\rightarrow i}) + \\b\n $$\n where \\(\\textrm{MLP}\\) is a multi-layer perceptron that outputs an\n edge-specific weight as a function of edge attributes.\n\n **Note:** In single mode, if the adjacency matrix is dense it will be\n converted to a SparseTensor automatically (which is an expensive operation).\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Binary adjacency matrices of shape `([batch], n_nodes, n_nodes)`;\n - Edge features. In single mode, shape `(num_edges, n_edge_features)`; in batch mode, shape\n `(batch, n_nodes, n_nodes, n_edge_features)`.\n\n **Output**\n\n - node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `kernel_network`: a list of integers representing the hidden neurons of\n the kernel-generating network;\n - 'root': if False, the layer will not consider the root node for computing\n the message passing (first term in equation above), but only the neighbours.\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n\n ", + "docstring": "\n An edge-conditioned convolutional layer (ECC) from the paper\n\n > [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on\n Graphs](https://arxiv.org/abs/1704.02901)
\n > Martin Simonovsky and Nikos Komodakis\n\n **Mode**: single, disjoint, batch, mixed.\n\n **In single, disjoint, and mixed mode, this layer expects a sparse adjacency\n matrix. If a dense adjacency is given as input, it will be automatically\n cast to sparse, which might be expensive.**\n\n This layer computes:\n $$\n \\x_i' = \\x_{i} \\W_{\\textrm{root}} + \\sum\\limits_{j \\in \\mathcal{N}(i)}\n \\x_{j} \\textrm{MLP}(\\e_{j \\rightarrow i}) + \\b\n $$\n where \\(\\textrm{MLP}\\) is a multi-layer perceptron that outputs an\n edge-specific weight as a function of edge attributes.\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Binary adjacency matrices of shape `([batch], n_nodes, n_nodes)`;\n - Edge features. In single mode, shape `(num_edges, n_edge_features)`; in\n batch mode, shape `(batch, n_nodes, n_nodes, n_edge_features)`.\n\n **Output**\n\n - node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `kernel_network`: a list of integers representing the hidden neurons of\n the kernel-generating network;\n - 'root': if False, the layer will not consider the root node for computing\n the message passing (first term in equation above), but only the neighbours.\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n\n ", "arguments": [ { "name": "self", @@ -5035,6 +6031,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/convolutional/ecc_conv.py", @@ -5043,7 +6043,7 @@ { "name": "EdgeConv", "base": "MessagePassing", - "docstring": "\n An edge convolutional layer from the paper\n\n > [Dynamic Graph CNN for Learning on Point Clouds](https://arxiv.org/abs/1801.07829)
\n > Yue Wang et al.\n\n **Mode**: single, disjoint.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes for each node \\(i\\):\n $$\n \\x_i' = \\sum\\limits_{j \\in \\mathcal{N}(i)} \\textrm{MLP}\\big( \\x_i \\|\n \\x_j - \\x_i \\big)\n $$\n where \\(\\textrm{MLP}\\) is a multi-layer perceptron.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `mlp_hidden`: list of integers, number of hidden units for each hidden\n layer in the MLP (if None, the MLP has only the output layer);\n - `mlp_activation`: activation for the MLP layers;\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", + "docstring": "\n An edge convolutional layer from the paper\n\n > [Dynamic Graph CNN for Learning on Point Clouds](https://arxiv.org/abs/1801.07829)
\n > Yue Wang et al.\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes for each node \\(i\\):\n $$\n \\x_i' = \\sum\\limits_{j \\in \\mathcal{N}(i)} \\textrm{MLP}\\big( \\x_i \\|\n \\x_j - \\x_i \\big)\n $$\n where \\(\\textrm{MLP}\\) is a multi-layer perceptron.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `mlp_hidden`: list of integers, number of hidden units for each hidden\n layer in the MLP (if None, the MLP has only the output layer);\n - `mlp_activation`: activation for the MLP layers;\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", "arguments": [ { "name": "self", @@ -5121,7 +6121,7 @@ { "name": "GATConv", "base": "Conv", - "docstring": "\n A Graph Attention layer (GAT) from the paper\n\n > [Graph Attention Networks](https://arxiv.org/abs/1710.10903)
\n > Petar Veli\u010dkovi\u0107 et al.\n\n **Mode**: single, disjoint, mixed, batch.\n\n **This layer expects dense inputs when working in batch mode.**\n\n This layer computes a convolution similar to `layers.GraphConv`, but\n uses the attention mechanism to weight the adjacency matrix instead of\n using the normalized Laplacian:\n $$\n \\X' = \\mathbf{\\alpha}\\X\\W + \\b\n $$\n where\n $$\n \\mathbf{\\alpha}_{ij} =\\frac{ \\exp\\left(\\mathrm{LeakyReLU}\\left(\n \\a^{\\top} [(\\X\\W)_i \\, \\| \\, (\\X\\W)_j]\\right)\\right)}{\\sum\\limits_{k\n \\in \\mathcal{N}(i) \\cup \\{ i \\}} \\exp\\left(\\mathrm{LeakyReLU}\\left(\n \\a^{\\top} [(\\X\\W)_i \\, \\| \\, (\\X\\W)_k]\\right)\\right)}\n $$\n where \\(\\a \\in \\mathbb{R}^{2F'}\\) is a trainable attention kernel.\n Dropout is also applied to \\(\\alpha\\) before computing \\(\\Z\\).\n Parallel attention heads are computed in parallel and their results are\n aggregated by concatenation or average.\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `([batch], n_nodes, n_nodes)`;\n\n **Output**\n\n - Node features with the same shape as the input, but with the last\n dimension changed to `channels`;\n - if `return_attn_coef=True`, a list with the attention coefficients for\n each attention head. Each attention coefficient matrix has shape\n `([batch], n_nodes, n_nodes)`.\n\n **Arguments**\n\n - `channels`: number of output channels;\n - `attn_heads`: number of attention heads to use;\n - `concat_heads`: bool, whether to concatenate the output of the attention\n heads instead of averaging;\n - `dropout_rate`: internal dropout rate for attention coefficients;\n - `return_attn_coef`: if True, return the attention coefficients for\n the given input (one n_nodes x n_nodes matrix for each head).\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `attn_kernel_initializer`: initializer for the attention weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `attn_kernel_regularizer`: regularization applied to the attention kernels;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `attn_kernel_constraint`: constraint applied to the attention kernels;\n - `bias_constraint`: constraint applied to the bias vector.\n\n ", + "docstring": "\n A Graph Attention layer (GAT) from the paper\n\n > [Graph Attention Networks](https://arxiv.org/abs/1710.10903)
\n > Petar Veli\u010dkovi\u0107 et al.\n\n **Mode**: single, disjoint, mixed, batch.\n\n **This layer expects dense inputs when working in batch mode.**\n\n This layer computes a convolution similar to `layers.GraphConv`, but\n uses the attention mechanism to weight the adjacency matrix instead of\n using the normalized Laplacian:\n $$\n \\X' = \\mathbf{\\alpha}\\X\\W + \\b\n $$\n where\n $$\n \\mathbf{\\alpha}_{ij} =\\frac{ \\exp\\left(\\mathrm{LeakyReLU}\\left(\n \\a^{\\top} [(\\X\\W)_i \\, \\| \\, (\\X\\W)_j]\\right)\\right)}{\\sum\\limits_{k\n \\in \\mathcal{N}(i) \\cup \\{ i \\}} \\exp\\left(\\mathrm{LeakyReLU}\\left(\n \\a^{\\top} [(\\X\\W)_i \\, \\| \\, (\\X\\W)_k]\\right)\\right)}\n $$\n where \\(\\a \\in \\mathbb{R}^{2F'}\\) is a trainable attention kernel.\n Dropout is also applied to \\(\\alpha\\) before computing \\(\\Z\\).\n Parallel attention heads are computed in parallel and their results are\n aggregated by concatenation or average.\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `([batch], n_nodes, n_nodes)`;\n\n **Output**\n\n - Node features with the same shape as the input, but with the last\n dimension changed to `channels`;\n - if `return_attn_coef=True`, a list with the attention coefficients for\n each attention head. Each attention coefficient matrix has shape\n `([batch], n_nodes, n_nodes)`.\n\n **Arguments**\n\n - `channels`: number of output channels;\n - `attn_heads`: number of attention heads to use;\n - `concat_heads`: bool, whether to concatenate the output of the attention\n heads instead of averaging;\n - `dropout_rate`: internal dropout rate for attention coefficients;\n - `return_attn_coef`: if True, return the attention coefficients for\n the given input (one n_nodes x n_nodes matrix for each head).\n - `add_self_loops`: if True, add self loops to the adjacency matrix.\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `attn_kernel_initializer`: initializer for the attention weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `attn_kernel_regularizer`: regularization applied to the attention kernels;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `attn_kernel_constraint`: constraint applied to the attention kernels;\n - `bias_constraint`: constraint applied to the bias vector.\n\n ", "arguments": [ { "name": "self", @@ -5149,6 +6149,11 @@ "default": "False", "type": "boolean" }, + { + "name": "add_self_loops", + "default": "True", + "type": "boolean" + }, { "name": "activation", "default": "None" @@ -5209,6 +6214,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/convolutional/gat_conv.py", @@ -5275,6 +6284,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/convolutional/gcn_conv.py", @@ -5341,6 +6354,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/convolutional/gcs_conv.py", @@ -5349,7 +6366,7 @@ { "name": "GINConv", "base": "MessagePassing", - "docstring": "\n A Graph Isomorphism Network (GIN) from the paper\n\n > [How Powerful are Graph Neural Networks?](https://arxiv.org/abs/1810.00826)
\n > Keyulu Xu et al.\n\n **Mode**: single, disjoint.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes for each node \\(i\\):\n $$\n \\x_i' = \\textrm{MLP}\\big( (1 + \\epsilon) \\cdot \\x_i + \\sum\\limits_{j\n \\in \\mathcal{N}(i)} \\x_j \\big)\n $$\n where \\(\\textrm{MLP}\\) is a multi-layer perceptron.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `epsilon`: unnamed parameter, see the original paper and the equation\n above.\n By setting `epsilon=None`, the parameter will be learned (default behaviour).\n If given as a value, the parameter will stay fixed.\n - `mlp_hidden`: list of integers, number of hidden units for each hidden\n layer in the MLP (if None, the MLP has only the output layer);\n - `mlp_activation`: activation for the MLP layers;\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", + "docstring": "\n A Graph Isomorphism Network (GIN) from the paper\n\n > [How Powerful are Graph Neural Networks?](https://arxiv.org/abs/1810.00826)
\n > Keyulu Xu et al.\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes for each node \\(i\\):\n $$\n \\x_i' = \\textrm{MLP}\\big( (1 + \\epsilon) \\cdot \\x_i + \\sum\\limits_{j\n \\in \\mathcal{N}(i)} \\x_j \\big)\n $$\n where \\(\\textrm{MLP}\\) is a multi-layer perceptron.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `epsilon`: unnamed parameter, see the original paper and the equation\n above.\n By setting `epsilon=None`, the parameter will be learned (default behaviour).\n If given as a value, the parameter will stay fixed.\n - `mlp_hidden`: list of integers, number of hidden units for each hidden\n layer in the MLP (if None, the MLP has only the output layer);\n - `mlp_activation`: activation for the MLP layers;\n - `mlp_batchnorm`: apply batch normalization after every hidden layer of the MLP;\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", "arguments": [ { "name": "self", @@ -5371,6 +6388,11 @@ "name": "mlp_activation", "default": "relu" }, + { + "name": "mlp_batchnorm", + "default": "True", + "type": "boolean" + }, { "name": "aggregate", "default": "sum" @@ -5431,7 +6453,7 @@ { "name": "GatedGraphConv", "base": "MessagePassing", - "docstring": "\n A gated graph convolutional layer from the paper\n\n > [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493)
\n > Yujia Li et al.\n\n **Mode**: single, disjoint.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes \\(\\x_i' = \\h^{(L)}_i\\) where:\n $$\n \\begin{align}\n & \\h^{(0)}_i = \\x_i \\| \\mathbf{0} \\\\\n & \\m^{(l)}_i = \\sum\\limits_{j \\in \\mathcal{N}(i)} \\h^{(l - 1)}_j \\W \\\\\n & \\h^{(l)}_i = \\textrm{GRU} \\left(\\m^{(l)}_i, \\h^{(l - 1)}_i \\right) \\\\\n \\end{align}\n $$\n where \\(\\textrm{GRU}\\) is a gated recurrent unit cell.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`; note that `n_node_features` must be smaller or equal\n than `channels`.\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `n_layers`: integer, number of iterations with the GRU cell;\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", + "docstring": "\n A gated graph convolutional layer from the paper\n\n > [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493)
\n > Yujia Li et al.\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes \\(\\x_i' = \\h^{(L)}_i\\) where:\n $$\n \\begin{align}\n & \\h^{(0)}_i = \\x_i \\| \\mathbf{0} \\\\\n & \\m^{(l)}_i = \\sum\\limits_{j \\in \\mathcal{N}(i)} \\h^{(l - 1)}_j \\W \\\\\n & \\h^{(l)}_i = \\textrm{GRU} \\left(\\m^{(l)}_i, \\h^{(l - 1)}_i \\right) \\\\\n \\end{align}\n $$\n where \\(\\textrm{GRU}\\) is a gated recurrent unit cell.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`; note that\n `n_node_features` must be smaller or equal than `channels`.\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `n_layers`: integer, number of iterations with the GRU cell;\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", "arguments": [ { "name": "self", @@ -5501,7 +6523,7 @@ { "name": "GeneralConv", "base": "MessagePassing", - "docstring": "\n A general convolutional layer from the paper\n\n > [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843)
\n > Jiaxuan You et al.\n\n **Mode**: single, disjoint.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\x_i' = \\mathrm{Agg} \\left( \\left\\{ \\mathrm{Act} \\left( \\mathrm{Dropout}\n \\left( \\mathrm{BN} \\left( \\x_j \\W + \\b \\right) \\right) \\right),\n j \\in \\mathcal{N}(i) \\right\\} \\right)\n $$\n\n where \\( \\mathrm{Agg} \\) is an aggregation function for the messages,\n \\( \\mathrm{Act} \\) is an activation function, \\( \\mathrm{Dropout} \\)\n applies dropout to the node features, and \\( \\mathrm{BN} \\) applies batch\n normalization to the node features.\n\n This layer supports the PReLU activation via the 'prelu' keyword.\n\n The default parameters of this layer are selected according to the best\n results obtained in the paper, and should provide a good performance on\n many node-level and graph-level tasks, without modifications.\n The defaults are as follows:\n\n - 256 channels\n - Batch normalization\n - No dropout\n - PReLU activation\n - Sum aggregation\n\n If you are uncertain about which layers to use for your GNN, this is a\n safe choice. Check out the original paper for more specific configurations.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `batch_norm`: bool, whether to use batch normalization;\n - `dropout`: float, dropout rate;\n - `aggregate`: string or callable, an aggregation function. Supported\n aggregations: 'sum', 'mean', 'max', 'min', 'prod'.\n - `activation`: activation function. This layer also supports the\n advanced activation PReLU by passing `activation='prelu'`.\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", + "docstring": "\n A general convolutional layer from the paper\n\n > [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843)
\n > Jiaxuan You et al.\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\x_i' = \\mathrm{Agg} \\left( \\left\\{ \\mathrm{Act} \\left( \\mathrm{Dropout}\n \\left( \\mathrm{BN} \\left( \\x_j \\W + \\b \\right) \\right) \\right),\n j \\in \\mathcal{N}(i) \\right\\} \\right)\n $$\n\n where \\( \\mathrm{Agg} \\) is an aggregation function for the messages,\n \\( \\mathrm{Act} \\) is an activation function, \\( \\mathrm{Dropout} \\)\n applies dropout to the node features, and \\( \\mathrm{BN} \\) applies batch\n normalization to the node features.\n\n This layer supports the PReLU activation via the 'prelu' keyword.\n\n The default parameters of this layer are selected according to the best\n results obtained in the paper, and should provide a good performance on\n many node-level and graph-level tasks, without modifications.\n The defaults are as follows:\n\n - 256 channels\n - Batch normalization\n - No dropout\n - PReLU activation\n - Sum aggregation\n\n If you are uncertain about which layers to use for your GNN, this is a\n safe choice. Check out the original paper for more specific configurations.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `batch_norm`: bool, whether to use batch normalization;\n - `dropout`: float, dropout rate;\n - `aggregate`: string or callable, an aggregation function. Supported\n aggregations: 'sum', 'mean', 'max', 'min', 'prod'.\n - `activation`: activation function. This layer also supports the\n advanced activation PReLU by passing `activation='prelu'`.\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", "arguments": [ { "name": "self", @@ -5742,10 +6764,53 @@ "file": "spektral/layers/pooling/global_pool.py", "aliases": [] }, + { + "name": "GraphMasking", + "base": "Layer", + "docstring": "\n A layer that starts the propagation of masks in a model.\n\n This layer assumes that the node features given as input have been extended with a\n binary mask that indicates which nodes are valid in each graph.\n The layer is useful when using a `data.BatchLoader` with `mask=True` or in general\n when zero-padding graphs so that all batches have the same size. The binary mask\n indicates with a 1 those nodes that should be taken into account by the model.\n\n The layer will remove the rightmost feature from the nodes and start a mask\n propagation to all subsequent layers:\n\n ```python\n print(x.shape) # shape (batch, n_nodes, n_node_features + 1)\n mask = x[..., -1:] # shape (batch, n_nodes, 1)\n x_new = x[..., :-1] # shape (batch, n_nodes, n_node_features)\n ```\n\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "trainable", + "default": "True", + "type": "boolean" + }, + { + "name": "name", + "default": "None" + }, + { + "name": "dtype", + "default": "None" + }, + { + "name": "dynamic", + "default": "False", + "type": "boolean" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "spektral/layers/base.py", + "aliases": [] + }, { "name": "GraphSageConv", "base": "MessagePassing", - "docstring": "\n A GraphSAGE layer from the paper\n\n > [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216)
\n > William L. Hamilton et al.\n\n **Mode**: single, disjoint.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\X' = \\big[ \\textrm{AGGREGATE}(\\X) \\| \\X \\big] \\W + \\b; \\\\\n \\X' = \\frac{\\X'}{\\|\\X'\\|}\n $$\n where \\( \\textrm{AGGREGATE} \\) is a function to aggregate a node's\n neighbourhood. The supported aggregation methods are: sum, mean,\n max, min, and product.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape as the input, but with the last\n dimension changed to `channels`.\n\n **Arguments**\n\n - `channels`: number of output channels;\n - `aggregate_op`: str, aggregation method to use (`'sum'`, `'mean'`,\n `'max'`, `'min'`, `'prod'`);\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n\n ", + "docstring": "\n A GraphSAGE layer from the paper\n\n > [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216)
\n > William L. Hamilton et al.\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\X' = \\big[ \\textrm{AGGREGATE}(\\X) \\| \\X \\big] \\W + \\b; \\\\\n \\X' = \\frac{\\X'}{\\|\\X'\\|}\n $$\n where \\( \\textrm{AGGREGATE} \\) is a function to aggregate a node's\n neighbourhood. The supported aggregation methods are: sum, mean,\n max, min, and product.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape as the input, but with the last\n dimension changed to `channels`.\n\n **Arguments**\n\n - `channels`: number of output channels;\n - `aggregate_op`: str, aggregation method to use (`'sum'`, `'mean'`,\n `'max'`, `'min'`, `'prod'`);\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n\n ", "arguments": [ { "name": "self", @@ -5815,7 +6880,7 @@ { "name": "InnerProduct", "base": "Layer", - "docstring": "\n Computes the inner product between elements of a 2d Tensor:\n $$\n \\langle \\x, \\x \\rangle = \\x\\x^\\top.\n $$\n\n **Mode**: single.\n\n **Input**\n\n - Tensor of shape `(N, M)`;\n\n **Output**\n\n - Tensor of shape `(N, N)`.\n\n :param trainable_kernel: add a trainable square matrix between the inner\n product (e.g., `X @ W @ X.T`);\n :param activation: activation function;\n :param kernel_initializer: initializer for the weights;\n :param kernel_regularizer: regularization applied to the kernel;\n :param kernel_constraint: constraint applied to the kernel;\n ", + "docstring": "\n Computes the inner product between elements of a 2d Tensor:\n $$\n \\langle \\x, \\x \\rangle = \\x\\x^\\top.\n $$\n\n **Mode**: single.\n\n **Input**\n\n - Tensor of shape `(n_nodes, n_features)`;\n\n **Output**\n\n - Tensor of shape `(n_nodes, n_nodes)`.\n\n :param trainable_kernel: add a trainable square matrix between the inner\n product (e.g., `X @ W @ X.T`);\n :param activation: activation function;\n :param kernel_initializer: initializer for the weights;\n :param kernel_regularizer: regularization applied to the kernel;\n :param kernel_constraint: constraint applied to the kernel;\n ", "arguments": [ { "name": "self", @@ -5861,7 +6926,7 @@ { "name": "MessagePassing", "base": "Layer", - "docstring": "\n A general class for message passing networks from the paper\n\n > [Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212)
\n > Justin Gilmer et al.\n\n **Mode**: single, disjoint.\n\n **This layer and all of its extensions expect a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\x_i' = \\gamma \\left( \\x_i, \\square_{j \\in \\mathcal{N}(i)} \\,\n \\phi \\left(\\x_i, \\x_j, \\e_{j \\rightarrow i} \\right) \\right),\n $$\n \n where \\( \\gamma \\) is a differentiable update function, \\( \\phi \\) is a\n differentiable message function, \\( \\square \\) is a permutation-invariant\n function to aggregate the messages (like the sum or the average), and\n \\(\\E_{ij}\\) is the edge attribute of edge i-j.\n\n By extending this class, it is possible to create any message-passing layer\n in single/disjoint mode.\n\n **API**\n\n ```python\n propagate(x, a, e=None, **kwargs)\n ```\n Propagates the messages and computes embeddings for each node in the graph.
\n Any `kwargs` will be forwarded as keyword arguments to `message()`,\n `aggregate()` and `update()`.\n\n ```python\n message(x, **kwargs)\n ```\n Computes messages, equivalent to \\(\\phi\\) in the definition.
\n Any extra keyword argument of this function will be populated by\n `propagate()` if a matching keyword is found.
\n Use `self.get_i()` and `self.get_j()` to gather the elements using the\n indices `i` or `j` of the adjacency matrix. Equivalently, you can access\n the indices themselves via the `index_i` and `index_j` attributes.\n\n ```python\n aggregate(messages, **kwargs)\n ```\n Aggregates the messages, equivalent to \\(\\square\\) in the definition.
\n The behaviour of this function can also be controlled using the `aggregate`\n keyword in the constructor of the layer (supported aggregations: sum, mean,\n max, min, prod).
\n Any extra keyword argument of this function will be populated by\n `propagate()` if a matching keyword is found.\n\n ```python\n update(embeddings, **kwargs)\n ```\n Updates the aggregated messages to obtain the final node embeddings,\n equivalent to \\(\\gamma\\) in the definition.
\n Any extra keyword argument of this function will be populated by\n `propagate()` if a matching keyword is found.\n\n **Arguments**:\n\n - `aggregate`: string or callable, an aggregation function. This flag can be\n used to control the behaviour of `aggregate()` wihtout re-implementing it.\n Supported aggregations: 'sum', 'mean', 'max', 'min', 'prod'.\n If callable, the function must have the signature `foo(updates, indices, n_nodes)`\n and return a rank 2 tensor with shape `(n_nodes, ...)`.\n - `kwargs`: additional keyword arguments specific to Keras' Layers, like\n regularizers, initializers, constraints, etc.\n ", + "docstring": "\n A general class for message passing networks from the paper\n\n > [Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212)
\n > Justin Gilmer et al.\n\n **Mode**: single, disjoint.\n\n **This layer and all of its extensions expect a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\x_i' = \\gamma \\left( \\x_i, \\square_{j \\in \\mathcal{N}(i)} \\,\n \\phi \\left(\\x_i, \\x_j, \\e_{j \\rightarrow i} \\right) \\right),\n $$\n\n where \\( \\gamma \\) is a differentiable update function, \\( \\phi \\) is a\n differentiable message function, \\( \\square \\) is a permutation-invariant\n function to aggregate the messages (like the sum or the average), and\n \\(\\E_{ij}\\) is the edge attribute of edge i-j.\n\n By extending this class, it is possible to create any message-passing layer\n in single/disjoint mode.\n\n **API**\n\n ```python\n propagate(x, a, e=None, **kwargs)\n ```\n Propagates the messages and computes embeddings for each node in the graph.
\n Any `kwargs` will be forwarded as keyword arguments to `message()`,\n `aggregate()` and `update()`.\n\n ```python\n message(x, **kwargs)\n ```\n Computes messages, equivalent to \\(\\phi\\) in the definition.
\n Any extra keyword argument of this function will be populated by\n `propagate()` if a matching keyword is found.
\n Use `self.get_i()` and `self.get_j()` to gather the elements using the\n indices `i` or `j` of the adjacency matrix. Equivalently, you can access\n the indices themselves via the `index_i` and `index_j` attributes.\n\n ```python\n aggregate(messages, **kwargs)\n ```\n Aggregates the messages, equivalent to \\(\\square\\) in the definition.
\n The behaviour of this function can also be controlled using the `aggregate`\n keyword in the constructor of the layer (supported aggregations: sum, mean,\n max, min, prod).
\n Any extra keyword argument of this function will be populated by\n `propagate()` if a matching keyword is found.\n\n ```python\n update(embeddings, **kwargs)\n ```\n Updates the aggregated messages to obtain the final node embeddings,\n equivalent to \\(\\gamma\\) in the definition.
\n Any extra keyword argument of this function will be populated by\n `propagate()` if a matching keyword is found.\n\n **Arguments**:\n\n - `aggregate`: string or callable, an aggregation function. This flag can be\n used to control the behaviour of `aggregate()` wihtout re-implementing it.\n Supported aggregations: 'sum', 'mean', 'max', 'min', 'prod'.\n If callable, the function must have the signature `foo(updates, indices, n_nodes)`\n and return a rank 2 tensor with shape `(n_nodes, ...)`.\n - `kwargs`: additional keyword arguments specific to Keras' Layers, like\n regularizers, initializers, constraints, etc.\n ", "arguments": [ { "name": "self", @@ -5890,7 +6955,7 @@ { "name": "MinCutPool", "base": "Pool", - "docstring": "\n A MinCut pooling layer from the paper\n\n > [Spectral Clustering with Graph Neural Networks for Graph Pooling](https://arxiv.org/abs/1907.00481)
\n > Filippo Maria Bianchi et al.\n\n **Mode**: batch.\n\n This layer computes a soft clustering \\(\\S\\) of the input graphs using a MLP,\n and reduces graphs as follows:\n $$\n \\S = \\textrm{MLP}(\\X); \\\\\n \\A' = \\S^\\top \\A \\S; \\X' = \\S^\\top \\X;\n $$\n where MLP is a multi-layer perceptron with softmax output.\n Two auxiliary loss terms are also added to the model: the _minCUT loss_\n $$\n - \\frac{ \\mathrm{Tr}(\\S^\\top \\A \\S) }{ \\mathrm{Tr}(\\S^\\top \\D \\S) }\n $$\n and the _orthogonality loss_\n $$\n \\left\\|\n \\frac{\\S^\\top \\S}{\\| \\S^\\top \\S \\|_F}\n - \\frac{\\I_K}{\\sqrt{K}}\n \\right\\|_F.\n $$\n\n The layer can be used without a supervised loss, to compute node clustering\n simply by minimizing the two auxiliary losses.\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Symmetrically normalized adjacency matrix of shape `([batch], n_nodes, n_nodes)`;\n\n **Output**\n\n - Reduced node features of shape `([batch], K, n_node_features)`;\n - Reduced adjacency matrix of shape `([batch], K, K)`;\n - If `return_mask=True`, the soft clustering matrix of shape `([batch], n_nodes, K)`.\n\n **Arguments**\n\n - `k`: number of nodes to keep;\n - `mlp_hidden`: list of integers, number of hidden units for each hidden\n layer in the MLP used to compute cluster assignments (if None, the MLP has\n only the output layer);\n - `mlp_activation`: activation for the MLP layers;\n - `return_mask`: boolean, whether to return the cluster assignment matrix;\n - `use_bias`: use bias in the MLP;\n - `kernel_initializer`: initializer for the weights of the MLP;\n - `bias_initializer`: initializer for the bias of the MLP;\n - `kernel_regularizer`: regularization applied to the weights of the MLP;\n - `bias_regularizer`: regularization applied to the bias of the MLP;\n - `kernel_constraint`: constraint applied to the weights of the MLP;\n - `bias_constraint`: constraint applied to the bias of the MLP;\n ", + "docstring": "\n A MinCut pooling layer from the paper\n\n > [Spectral Clustering with Graph Neural Networks for Graph Pooling](https://arxiv.org/abs/1907.00481)
\n > Filippo Maria Bianchi et al.\n\n **Mode**: batch.\n\n This layer computes a soft clustering \\(\\S\\) of the input graphs using a MLP,\n and reduces graphs as follows:\n $$\n \\begin{align}\n \\S &= \\textrm{MLP}(\\X); \\\\\n \\A' &= \\S^\\top \\A \\S; \\\\ \n \\X' &= \\S^\\top \\X\n \\end{align}\n $$\n where MLP is a multi-layer perceptron with softmax output.\n\n Two auxiliary loss terms are also added to the model: the _minCUT loss_\n $$\n L_c = - \\frac{ \\mathrm{Tr}(\\S^\\top \\A \\S) }{ \\mathrm{Tr}(\\S^\\top \\D \\S) }\n $$\n and the _orthogonality loss_\n $$\n L_o = \\left\\|\n \\frac{\\S^\\top \\S}{\\| \\S^\\top \\S \\|_F}\n - \\frac{\\I_K}{\\sqrt{K}}\n \\right\\|_F.\n $$\n\n The layer can be used without a supervised loss, to compute node clustering\n simply by minimizing the two auxiliary losses.\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Symmetrically normalized adjacency matrix of shape `([batch], n_nodes, n_nodes)`;\n\n **Output**\n\n - Reduced node features of shape `([batch], K, n_node_features)`;\n - Reduced adjacency matrix of shape `([batch], K, K)`;\n - If `return_mask=True`, the soft clustering matrix of shape `([batch], n_nodes, K)`.\n\n **Arguments**\n\n - `k`: number of output nodes;\n - `mlp_hidden`: list of integers, number of hidden units for each hidden\n layer in the MLP used to compute cluster assignments (if None, the MLP has\n only the output layer);\n - `mlp_activation`: activation for the MLP layers;\n - `return_mask`: boolean, whether to return the cluster assignment matrix;\n - `use_bias`: use bias in the MLP;\n - `kernel_initializer`: initializer for the weights of the MLP;\n - `bias_initializer`: initializer for the bias of the MLP;\n - `kernel_regularizer`: regularization applied to the weights of the MLP;\n - `bias_regularizer`: regularization applied to the bias of the MLP;\n - `kernel_constraint`: constraint applied to the weights of the MLP;\n - `bias_constraint`: constraint applied to the bias of the MLP;\n ", "arguments": [ { "name": "self", @@ -5957,6 +7022,10 @@ { "name": "inputs", "default": null + }, + { + "name": "mask", + "default": "None" } ], "file": "spektral/layers/pooling/mincut_pool.py", @@ -5965,16 +7034,12 @@ { "name": "MinkowskiProduct", "base": "Layer", - "docstring": "\n Computes the hyperbolic inner product between elements of a rank 2 Tensor:\n $$\n \\langle \\x, \\x \\rangle = \\x \\,\n \\begin{pmatrix}\n \\I_{d \\times d} & 0 \\\\\n 0 & -1\n \\end{pmatrix} \\, \\x^\\top.\n $$\n\n **Mode**: single.\n\n **Input**\n\n - Tensor of shape `(N, M)`;\n\n **Output**\n\n - Tensor of shape `(N, N)`.\n\n :param input_dim_1: first dimension of the input Tensor; set this if you\n encounter issues with shapes in your model, in order to provide an explicit\n output shape for your layer.\n :param activation: activation function;\n ", + "docstring": "\n Computes the hyperbolic inner product between elements of a rank 2 Tensor:\n $$\n \\langle \\x, \\x \\rangle = \\x \\,\n \\begin{pmatrix}\n \\I_{d \\times d} & 0 \\\\\n 0 & -1\n \\end{pmatrix} \\, \\x^\\top.\n $$\n\n **Mode**: single.\n\n **Input**\n\n - Tensor of shape `(n_nodes, n_features)`;\n\n **Output**\n\n - Tensor of shape `(n_nodes, n_nodes)`.\n\n :param activation: activation function;\n ", "arguments": [ { "name": "self", "default": null }, - { - "name": "input_dim_1", - "default": "None" - }, { "name": "activation", "default": "None" @@ -5998,7 +7063,7 @@ { "name": "SAGPool", "base": "TopKPool", - "docstring": "\n A self-attention graph pooling layer (SAG) from the paper\n\n > [Self-Attention Graph Pooling](https://arxiv.org/abs/1904.08082)
\n > Junhyun Lee et al.\n\n **Mode**: single, disjoint.\n\n This layer computes the following operations:\n $$\n \\y = \\textrm{GNN}(\\A, \\X); \\;\\;\\;\\;\n \\i = \\textrm{rank}(\\y, K); \\;\\;\\;\\;\n \\X' = (\\X \\odot \\textrm{tanh}(\\y))_\\i; \\;\\;\\;\\;\n \\A' = \\A_{\\i, \\i}\n $$\n\n where \\( \\textrm{rank}(\\y, K) \\) returns the indices of the top K values of\n \\(\\y\\), and \\(\\textrm{GNN}\\) consists of one GraphConv layer with no\n activation. \\(K\\) is defined for each graph as a fraction of the number of\n nodes.\n\n This layer temporarily makes the adjacency matrix dense in order to compute\n \\(\\A'\\).\n If memory is not an issue, considerable speedups can be achieved by using\n dense graphs directly.\n Converting a graph from sparse to dense and back to sparse is an expensive\n operation.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`;\n - Graph IDs of shape `(n_nodes, )` (only in disjoint mode);\n\n **Output**\n\n - Reduced node features of shape `(ratio * n_nodes, n_node_features)`;\n - Reduced adjacency matrix of shape `(ratio * n_nodes, ratio * n_nodes)`;\n - Reduced graph IDs of shape `(ratio * n_nodes, )` (only in disjoint mode);\n - If `return_mask=True`, the binary pooling mask of shape `(ratio * n_nodes, )`.\n\n **Arguments**\n\n - `ratio`: float between 0 and 1, ratio of nodes to keep in each graph;\n - `return_mask`: boolean, whether to return the binary mask used for pooling;\n - `sigmoid_gating`: boolean, use a sigmoid gating activation instead of a\n tanh;\n - `kernel_initializer`: initializer for the weights;\n - `kernel_regularizer`: regularization applied to the weights;\n - `kernel_constraint`: constraint applied to the weights;\n ", + "docstring": "\n A self-attention graph pooling layer from the paper\n\n > [Self-Attention Graph Pooling](https://arxiv.org/abs/1904.08082)
\n > Junhyun Lee et al.\n\n **Mode**: single, disjoint.\n\n This layer computes the following operations:\n $$\n \\y = \\textrm{GNN}(\\A, \\X); \\;\\;\\;\\;\n \\i = \\textrm{rank}(\\y, K); \\;\\;\\;\\;\n \\X' = (\\X \\odot \\textrm{tanh}(\\y))_\\i; \\;\\;\\;\\;\n \\A' = \\A_{\\i, \\i}\n $$\n where \\(\\textrm{rank}(\\y, K)\\) returns the indices of the top K values of\n \\(\\y\\) and\n $$\n \\textrm{GNN}(\\A, \\X) = \\A \\X \\W.\n $$\n\n \\(K\\) is defined for each graph as a fraction of the number of nodes,\n controlled by the `ratio` argument.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`;\n - Graph IDs of shape `(n_nodes, )` (only in disjoint mode);\n\n **Output**\n\n - Reduced node features of shape `(ratio * n_nodes, n_node_features)`;\n - Reduced adjacency matrix of shape `(ratio * n_nodes, ratio * n_nodes)`;\n - Reduced graph IDs of shape `(ratio * n_nodes, )` (only in disjoint mode);\n - If `return_mask=True`, the binary pooling mask of shape `(ratio * n_nodes, )`.\n\n **Arguments**\n\n - `ratio`: float between 0 and 1, ratio of nodes to keep in each graph;\n - `return_mask`: boolean, whether to return the binary mask used for pooling;\n - `sigmoid_gating`: boolean, use a sigmoid gating activation instead of a\n tanh;\n - `kernel_initializer`: initializer for the weights;\n - `kernel_regularizer`: regularization applied to the weights;\n - `kernel_constraint`: constraint applied to the weights;\n ", "arguments": [ { "name": "self", @@ -6049,7 +7114,7 @@ { "name": "SortPool", "base": "Layer", - "docstring": "\n A SortPool layer as described by\n [Zhang et al](https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf).\n This layers takes a graph signal \\(\\mathbf{X}\\) and returns the topmost k\n rows according to the last column.\n If \\(\\mathbf{X}\\) has less than k rows, the result is zero-padded to k.\n \n **Mode**: single, disjoint, batch.\n \n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Graph IDs of shape `(n_nodes, )` (only in disjoint mode);\n\n **Output**\n\n - Pooled node features of shape `(batch, k, n_node_features)` (if single mode, shape will\n be `(1, k, n_node_features)`).\n\n **Arguments**\n\n - `k`: integer, number of nodes to keep;\n ", + "docstring": "\n A SortPool layer as described by\n [Zhang et al](https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf).\n This layers takes a graph signal \\(\\mathbf{X}\\) and returns the topmost k\n rows according to the last column.\n If \\(\\mathbf{X}\\) has less than k rows, the result is zero-padded to k.\n\n **Mode**: single, disjoint, batch.\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Graph IDs of shape `(n_nodes, )` (only in disjoint mode);\n\n **Output**\n\n - Pooled node features of shape `(batch, k, n_node_features)` (if single mode, shape will\n be `(1, k, n_node_features)`).\n\n **Arguments**\n\n - `k`: integer, number of nodes to keep;\n ", "arguments": [ { "name": "self", @@ -6119,7 +7184,7 @@ { "name": "TAGConv", "base": "MessagePassing", - "docstring": "\n A Topology Adaptive Graph Convolutional layer (TAG) from the paper\n\n > [Topology Adaptive Graph Convolutional Networks](https://arxiv.org/abs/1710.10370)
\n > Jian Du et al.\n\n **Mode**: single, disjoint.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\Z = \\sum\\limits_{k=0}^{K} \\D^{-1/2}\\A^k\\D^{-1/2}\\X\\W^{(k)}\n $$\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `K`: the order of the layer (i.e., the layer will consider a K-hop\n neighbourhood for each node);\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", + "docstring": "\n A Topology Adaptive Graph Convolutional layer (TAG) from the paper\n\n > [Topology Adaptive Graph Convolutional Networks](https://arxiv.org/abs/1710.10370)
\n > Jian Du et al.\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n This layer computes:\n $$\n \\Z = \\sum\\limits_{k=0}^{K} \\D^{-1/2}\\A^k\\D^{-1/2}\\X\\W^{(k)}\n $$\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `channels`.\n\n **Arguments**\n\n - `channels`: integer, number of output channels;\n - `K`: the order of the layer (i.e., the layer will consider a K-hop\n neighbourhood for each node);\n - `activation`: activation function;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", "arguments": [ { "name": "self", @@ -6193,7 +7258,7 @@ { "name": "TopKPool", "base": "Pool", - "docstring": "\n A gPool/Top-K layer from the papers\n\n > [Graph U-Nets](https://arxiv.org/abs/1905.05178)
\n > Hongyang Gao and Shuiwang Ji\n\n and\n\n > [Towards Sparse Hierarchical Graph Classifiers](https://arxiv.org/abs/1811.01287)
\n > C\u0103t\u0103lina Cangea et al.\n\n **Mode**: single, disjoint.\n\n This layer computes the following operations:\n $$\n \\y = \\frac{\\X\\p}{\\|\\p\\|}; \\;\\;\\;\\;\n \\i = \\textrm{rank}(\\y, K); \\;\\;\\;\\;\n \\X' = (\\X \\odot \\textrm{tanh}(\\y))_\\i; \\;\\;\\;\\;\n \\A' = \\A_{\\i, \\i}\n $$\n\n where \\( \\textrm{rank}(\\y, K) \\) returns the indices of the top K values of\n \\(\\y\\), and \\(\\p\\) is a learnable parameter vector of size \\(F\\). \\(K\\) is\n defined for each graph as a fraction of the number of nodes.\n Note that the the gating operation \\(\\textrm{tanh}(\\y)\\) (Cangea et al.)\n can be replaced with a sigmoid (Gao & Ji).\n\n This layer temporarily makes the adjacency matrix dense in order to compute\n \\(\\A'\\).\n If memory is not an issue, considerable speedups can be achieved by using\n dense graphs directly.\n Converting a graph from sparse to dense and back to sparse is an expensive\n operation.\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`;\n - Graph IDs of shape `(n_nodes, )` (only in disjoint mode);\n\n **Output**\n\n - Reduced node features of shape `(ratio * n_nodes, n_node_features)`;\n - Reduced adjacency matrix of shape `(ratio * n_nodes, ratio * n_nodes)`;\n - Reduced graph IDs of shape `(ratio * n_nodes, )` (only in disjoint mode);\n - If `return_mask=True`, the binary pooling mask of shape `(ratio * n_nodes, )`.\n\n **Arguments**\n\n - `ratio`: float between 0 and 1, ratio of nodes to keep in each graph;\n - `return_mask`: boolean, whether to return the binary mask used for pooling;\n - `sigmoid_gating`: boolean, use a sigmoid gating activation instead of a\n tanh;\n - `kernel_initializer`: initializer for the weights;\n - `kernel_regularizer`: regularization applied to the weights;\n - `kernel_constraint`: constraint applied to the weights;\n ", + "docstring": "\n A gPool/Top-K layer from the papers\n\n > [Graph U-Nets](https://arxiv.org/abs/1905.05178)
\n > Hongyang Gao and Shuiwang Ji\n\n and\n\n > [Towards Sparse Hierarchical Graph Classifiers](https://arxiv.org/abs/1811.01287)
\n > C\u0103t\u0103lina Cangea et al.\n\n **Mode**: single, disjoint.\n\n This layer computes the following operations:\n $$\n \\y = \\frac{\\X\\p}{\\|\\p\\|}; \\;\\;\\;\\;\n \\i = \\textrm{rank}(\\y, K); \\;\\;\\;\\;\n \\X' = (\\X \\odot \\textrm{tanh}(\\y))_\\i; \\;\\;\\;\\;\n \\A' = \\A_{\\i, \\i}\n $$\n where \\(\\textrm{rank}(\\y, K)\\) returns the indices of the top K values of\n \\(\\y\\), and \\(\\p\\) is a learnable parameter vector of size \\(F\\).\n\n \\(K\\) is defined for each graph as a fraction of the number of nodes,\n controlled by the `ratio` argument.\n\n Note that the the gating operation \\(\\textrm{tanh}(\\y)\\) (Cangea et al.)\n can be replaced with a sigmoid (Gao & Ji).\n\n **Input**\n\n - Node features of shape `(n_nodes, n_node_features)`;\n - Binary adjacency matrix of shape `(n_nodes, n_nodes)`;\n - Graph IDs of shape `(n_nodes, )` (only in disjoint mode);\n\n **Output**\n\n - Reduced node features of shape `(ratio * n_nodes, n_node_features)`;\n - Reduced adjacency matrix of shape `(ratio * n_nodes, ratio * n_nodes)`;\n - Reduced graph IDs of shape `(ratio * n_nodes, )` (only in disjoint mode);\n - If `return_mask=True`, the binary pooling mask of shape `(ratio * n_nodes, )`.\n\n **Arguments**\n\n - `ratio`: float between 0 and 1, ratio of nodes to keep in each graph;\n - `return_mask`: boolean, whether to return the binary mask used for pooling;\n - `sigmoid_gating`: boolean, use a sigmoid gating activation instead of a\n tanh;\n - `kernel_initializer`: initializer for the weights;\n - `kernel_regularizer`: regularization applied to the weights;\n - `kernel_constraint`: constraint applied to the weights;\n ", "arguments": [ { "name": "self", @@ -6241,6 +7306,180 @@ "file": "spektral/layers/pooling/topk_pool.py", "aliases": [] }, + { + "name": "XENetConv", + "base": "MessagePassing", + "docstring": "\n A XENet convolutional layer from the paper\n\n > [XENet: Using a new graph convolution to accelerate the timeline for protein design on quantum computers](https://www.biorxiv.org/content/10.1101/2021.05.05.442729v1)
\n > Jack B. Maguire, Daniele Grattarola, Eugene Klyshko, Vikram Khipple Mulligan, Hans Melo\n\n **Mode**: single, disjoint, mixed.\n\n **This layer expects a sparse adjacency matrix.**\n\n For a version of this layer that supports batch mode, you can use\n `spektral.layers.XENetDenseConv` as a drop-in replacement.\n\n This layer computes for each node \\(i\\):\n $$\n \\s_{ij} = \\text{PReLU} \\left( (\\x_{i} \\| \\x_{j} \\| \\e_{ij} \\| \\e_{ji}) \\W^{(s)} + \\b^{(s)} \\right) \\\\\n \\s^{(\\text{out})}_{i} = \\sum\\limits_{j \\in \\mathcal{N}(i)} \\s_{ij} \\\\\n \\s^{(\\text{in})}_{i} = \\sum\\limits_{j \\in \\mathcal{N}(i)} \\s_{ji} \\\\\n \\x_{i}' = \\sigma\\left( (\\x_{i} \\| \\s^{(\\text{out})}_{i} \\| \\s^{(\\text{in})}_{i}) \\W^{(n)} + \\b^{(n)} \\right) \\\\\n \\e_{ij}' = \\sigma\\left( \\s_{ij} \\W^{(e)} + \\b^{(e)} \\right)\n $$\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Binary adjacency matrices of shape `([batch], n_nodes, n_nodes)`;\n - Edge features of shape `(num_edges, n_edge_features)`;\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `node_channels`.\n - Edge features with the same shape of the input, but the last dimension\n changed to `edge_channels`.\n\n **Arguments**\n\n - `stack_channels`: integer or list of integers, number of channels for the hidden layers;\n - `node_channels`: integer, number of output channels for the nodes;\n - `edge_channels`: integer, number of output channels for the edges;\n - `attention`: whether to use attention when aggregating the stacks;\n - `node_activation`: activation function for nodes;\n - `edge_activation`: activation function for edges;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "stack_channels", + "default": null + }, + { + "name": "node_channels", + "default": null + }, + { + "name": "edge_channels", + "default": null + }, + { + "name": "attention", + "default": "True", + "type": "boolean" + }, + { + "name": "node_activation", + "default": "None" + }, + { + "name": "edge_activation", + "default": "None" + }, + { + "name": "aggregate", + "default": "sum" + }, + { + "name": "use_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "kernel_initializer", + "default": "glorot_uniform" + }, + { + "name": "bias_initializer", + "default": "zeros" + }, + { + "name": "kernel_regularizer", + "default": "None" + }, + { + "name": "bias_regularizer", + "default": "None" + }, + { + "name": "activity_regularizer", + "default": "None" + }, + { + "name": "kernel_constraint", + "default": "None" + }, + { + "name": "bias_constraint", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "spektral/layers/convolutional/xenet_conv.py", + "aliases": [] + }, + { + "name": "XENetDenseConv", + "base": "Conv", + "docstring": "\n A XENet convolutional layer from the paper\n\n > [XENet: Using a new graph convolution to accelerate the timeline for protein design on quantum computers](https://www.biorxiv.org/content/10.1101/2021.05.05.442729v1)
\n > Jack B. Maguire, Daniele Grattarola, Eugene Klyshko, Vikram Khipple Mulligan, Hans Melo\n\n **Mode**: batch.\n\n **This layer expects a dense adjacency matrix.**\n\n This layer computes for each node \\(i\\):\n $$\n \\s_{ij} = \\text{PReLU} \\left( (\\x_{i} \\| \\x_{j} \\| \\e_{ij} \\| \\e_{ji}) \\W^{(s)} + \\b^{(s)} \\right) \\\\\n \\s^{(\\text{out})}_{i} = \\sum\\limits_{j \\in \\mathcal{N}(i)} \\s_{ij} \\\\\n \\s^{(\\text{in})}_{i} = \\sum\\limits_{j \\in \\mathcal{N}(i)} \\s_{ji} \\\\\n \\x_{i}' = \\sigma\\left( (\\x_{i} \\| \\s^{(\\text{out})}_{i} \\| \\s^{(\\text{in})}_{i}) \\W^{(n)} + \\b^{(n)} \\right) \\\\\n \\e_{ij}' = \\sigma\\left( \\s_{ij} \\W^{(e)} + \\b^{(e)} \\right)\n $$\n\n **Input**\n\n - Node features of shape `([batch], n_nodes, n_node_features)`;\n - Binary adjacency matrices of shape `([batch], n_nodes, n_nodes)`;\n - Edge features of shape `(batch, n_nodes, n_nodes, n_edge_features)`.\n\n **Output**\n\n - Node features with the same shape of the input, but the last dimension\n changed to `node_channels`.\n - Edge features with the same shape of the input, but the last dimension\n changed to `edge_channels`.\n\n **Arguments**\n\n - `stack_channels`: integer or list of integers, number of channels for the hidden layers;\n - `node_channels`: integer, number of output channels for the nodes;\n - `edge_channels`: integer, number of output channels for the edges;\n - `attention`: whether to use attention when aggregating the stacks;\n - `node_activation`: activation function for nodes;\n - `edge_activation`: activation function for edges;\n - `use_bias`: bool, add a bias vector to the output;\n - `kernel_initializer`: initializer for the weights;\n - `bias_initializer`: initializer for the bias vector;\n - `kernel_regularizer`: regularization applied to the weights;\n - `bias_regularizer`: regularization applied to the bias vector;\n - `activity_regularizer`: regularization applied to the output;\n - `kernel_constraint`: constraint applied to the weights;\n - `bias_constraint`: constraint applied to the bias vector.\n ", + "arguments": [ + { + "name": "self", + "default": null + }, + { + "name": "stack_channels", + "default": null + }, + { + "name": "node_channels", + "default": null + }, + { + "name": "edge_channels", + "default": null + }, + { + "name": "attention", + "default": "True", + "type": "boolean" + }, + { + "name": "node_activation", + "default": "None" + }, + { + "name": "edge_activation", + "default": "None" + }, + { + "name": "aggregate", + "default": "sum" + }, + { + "name": "use_bias", + "default": "True", + "type": "boolean" + }, + { + "name": "kernel_initializer", + "default": "glorot_uniform" + }, + { + "name": "bias_initializer", + "default": "zeros" + }, + { + "name": "kernel_regularizer", + "default": "None" + }, + { + "name": "bias_regularizer", + "default": "None" + }, + { + "name": "activity_regularizer", + "default": "None" + }, + { + "name": "kernel_constraint", + "default": "None" + }, + { + "name": "bias_constraint", + "default": "None" + } + ], + "abstract": false, + "outputs": [], + "inputs": [ + { + "name": "self", + "default": null + }, + { + "name": "inputs", + "default": null + } + ], + "file": "spektral/layers/convolutional/xenet_conv.py", + "aliases": [] + }, { "name": "Input", "arguments": [ @@ -6262,8 +7501,7 @@ }, { "name": "sparse", - "default": "False", - "type": "boolean" + "default": "None" }, { "name": "tensor", @@ -6271,12 +7509,15 @@ }, { "name": "ragged", - "default": "False", - "type": "boolean" + "default": "None" + }, + { + "name": "type_spec", + "default": "None" } ], - "docstring": "`Input()` is used to instantiate a Keras tensor.\n\n A Keras tensor is a TensorFlow symbolic tensor object,\n which we augment with certain attributes that allow us to build a Keras model\n just by knowing the inputs and outputs of the model.\n\n For instance, if `a`, `b` and `c` are Keras tensors,\n it becomes possible to do:\n `model = Model(input=[a, b], output=c)`\n\n Arguments:\n shape: A shape tuple (integers), not including the batch size.\n For instance, `shape=(32,)` indicates that the expected input\n will be batches of 32-dimensional vectors. Elements of this tuple\n can be None; 'None' elements represent dimensions where the shape is\n not known.\n batch_size: optional static batch size (integer).\n name: An optional name string for the layer.\n Should be unique in a model (do not reuse the same name twice).\n It will be autogenerated if it isn't provided.\n dtype: The data type expected by the input, as a string\n (`float32`, `float64`, `int32`...)\n sparse: A boolean specifying whether the placeholder to be created is\n sparse. Only one of 'ragged' and 'sparse' can be True. Note that,\n if `sparse` is False, sparse tensors can still be passed into the\n input - they will be densified with a default value of 0.\n tensor: Optional existing tensor to wrap into the `Input` layer.\n If set, the layer will not create a placeholder tensor.\n ragged: A boolean specifying whether the placeholder to be created is\n ragged. Only one of 'ragged' and 'sparse' can be True. In this case,\n values of 'None' in the 'shape' argument represent ragged dimensions.\n For more information about RaggedTensors, see\n [this guide](https://www.tensorflow.org/guide/ragged_tensors).\n **kwargs: deprecated arguments support. Supports `batch_shape` and\n `batch_input_shape`.\n\n Returns:\n A `tensor`.\n\n Example:\n\n ```python\n # this is a logistic regression in Keras\n x = Input(shape=(32,))\n y = Dense(16, activation='softmax')(x)\n model = Model(x, y)\n ```\n\n Note that even if eager execution is enabled,\n `Input` produces a symbolic tensor (i.e. a placeholder).\n This symbolic tensor can be used with other\n TensorFlow ops, as such:\n\n ```python\n x = Input(shape=(32,))\n y = tf.square(x)\n ```\n\n Raises:\n ValueError: If both `sparse` and `ragged` are provided.\n ValueError: If both `shape` and (`batch_input_shape` or `batch_shape`) are\n provided.\n ValueError: If both `shape` and `tensor` are None.\n ValueError: if any unrecognized parameters are provided.\n ", - "file": "tensorflow/python/keras/engine/input_layer.py", + "docstring": "`Input()` is used to instantiate a Keras tensor.\n\n A Keras tensor is a symbolic tensor-like object,\n which we augment with certain attributes that allow us to build a Keras model\n just by knowing the inputs and outputs of the model.\n\n For instance, if `a`, `b` and `c` are Keras tensors,\n it becomes possible to do:\n `model = Model(input=[a, b], output=c)`\n\n Args:\n shape: A shape tuple (integers), not including the batch size.\n For instance, `shape=(32,)` indicates that the expected input\n will be batches of 32-dimensional vectors. Elements of this tuple\n can be None; 'None' elements represent dimensions where the shape is\n not known.\n batch_size: optional static batch size (integer).\n name: An optional name string for the layer.\n Should be unique in a model (do not reuse the same name twice).\n It will be autogenerated if it isn't provided.\n dtype: The data type expected by the input, as a string\n (`float32`, `float64`, `int32`...)\n sparse: A boolean specifying whether the placeholder to be created is\n sparse. Only one of 'ragged' and 'sparse' can be True. Note that,\n if `sparse` is False, sparse tensors can still be passed into the\n input - they will be densified with a default value of 0.\n tensor: Optional existing tensor to wrap into the `Input` layer.\n If set, the layer will use the `tf.TypeSpec` of this tensor rather\n than creating a new placeholder tensor.\n ragged: A boolean specifying whether the placeholder to be created is\n ragged. Only one of 'ragged' and 'sparse' can be True. In this case,\n values of 'None' in the 'shape' argument represent ragged dimensions.\n For more information about RaggedTensors, see\n [this guide](https://www.tensorflow.org/guide/ragged_tensors).\n type_spec: A `tf.TypeSpec` object to create the input placeholder from.\n When provided, all other args except name must be None.\n **kwargs: deprecated arguments support. Supports `batch_shape` and\n `batch_input_shape`.\n\n Returns:\n A `tensor`.\n\n Example:\n\n ```python\n # this is a logistic regression in Keras\n x = Input(shape=(32,))\n y = Dense(16, activation='softmax')(x)\n model = Model(x, y)\n ```\n\n Note that even if eager execution is enabled,\n `Input` produces a symbolic tensor-like object (i.e. a placeholder).\n This symbolic tensor-like object can be used with lower-level\n TensorFlow ops that take tensors as inputs, as such:\n\n ```python\n x = Input(shape=(32,))\n y = tf.square(x) # This op will be treated like a layer\n model = Model(x, y)\n ```\n\n (This behavior does not work for higher-order TensorFlow APIs such as\n control flow and being directly watched by a `tf.GradientTape`).\n\n However, the resulting model will not track any variables that were\n used as inputs to TensorFlow ops. All variable usages must happen within\n Keras layers to make sure they will be tracked by the model's weights.\n\n The Keras Input can also create a placeholder from an arbitrary `tf.TypeSpec`,\n e.g:\n\n ```python\n x = Input(type_spec=tf.RaggedTensorSpec(shape=[None, None],\n dtype=tf.float32, ragged_rank=1))\n y = x.values\n model = Model(x, y)\n ```\n When passing an arbitrary `tf.TypeSpec`, it must represent the signature of an\n entire batch instead of just one example.\n\n Raises:\n ValueError: If both `sparse` and `ragged` are provided.\n ValueError: If both `shape` and (`batch_input_shape` or `batch_shape`) are\n provided.\n ValueError: If `shape`, `tensor` and `type_spec` are None.\n ValueError: If arguments besides `type_spec` are non-None while `type_spec`\n is passed.\n ValueError: if any unrecognized parameters are provided.\n ", + "file": "keras/engine/input_layer.py", "aliases": [], "abstract": false } diff --git a/src/plugins/CreateKerasMeta/schemas/regularizers.json b/src/plugins/CreateKerasMeta/schemas/regularizers.json index 0bd3f84..4cf9ae5 100644 --- a/src/plugins/CreateKerasMeta/schemas/regularizers.json +++ b/src/plugins/CreateKerasMeta/schemas/regularizers.json @@ -25,7 +25,7 @@ "default": null } ], - "file": "tensorflow/python/keras/regularizers.py", + "file": "keras/regularizers.py", "aliases": [ "l1" ] @@ -60,7 +60,7 @@ "default": null } ], - "file": "tensorflow/python/keras/regularizers.py", + "file": "keras/regularizers.py", "aliases": [ "l1_l2" ] @@ -91,7 +91,7 @@ "default": null } ], - "file": "tensorflow/python/keras/regularizers.py", + "file": "keras/regularizers.py", "aliases": [ "l2" ] diff --git a/src/seeds/keras/keras.webgmex b/src/seeds/keras/keras.webgmex index fe94c689e8fe3cca600355c838a7932ed34101ad..6acf3bfe9d36ae330c732d2aadb5685c2da0750b 100644 GIT binary patch delta 191271 zcmb5XcbH{YdFJcW*4cPagIY+k0)}p>C0VLgQ#%KesZ%+gQ#p)nv18?&ONc_Y!GI7T zZP3OfA)8=if@GNyCQ4xJ83)3(=Uyj_`tdW5@io|R=b8x~!~Lzbc2y^yab5lpyVhQ3 zg>QY~{l0gd(?58R`Ft*bheOpFs8ouk< zVVH)Nk@$>}8Xn>dQeX77OgBI#j+0uU?xvpYh5@S$95Y6=PVCr-I59lYH*wr3^wTWR zy)d*R-?fY&&FsiF4A(SuKS^F4x}NUJ-5D$}<$w@;7Fb^FizTp8%e35(Rm5gSPcIY$ zbDBn+TCC8CU7zl5VnuArGb273(9bnki*?Ig#noQrY*F(ncRl%{pZqVCCdYAX-3SB2 z@O&#ZBRk9dAdONtO?}siqFDE>*s%~x@$~zToy{k!uP<7E{n(k@(O$nFnQ3N5j+upy z9au^1IN5vz?tWNaEnmhpLAERi{^G>KC;u{i-ZH7#2YO`VTY{)WOWNES}q5gMv>5V4-JXYE+pzkfgT%X z8tGXk7U9O6T%4wym}fCp5vO9&Q?+BFu;&((}=_(;v@-O%S(A}d*}*_ zGh(4nu0>DR$V@l1V^b-Ci%xTXwj1brmKqFaxjuV~6VuxdL(j4T19OpLO9E5qX%I1^ zjXZV7bNo0m3=8`S5B|68EzIPO1r|$ z4h%2BTrA6uJe}PnwjS-9j;r%ZH!aK8ZBL2Vayh0HgAQVpHOz!zVOehExagXP1)vh9 zD;D8!T&bJ6E;c6AG>knfz(B;VtjPCRNnn_U95?e!R^)_&k`n^q*#4snx zww;JS5-YD~ z#G)g1oyKt*r+(s~DK=WjF}k`uXUh%Z2+Kj=%q+E{%wnT~jb`eet8*%`8`+K_&zb2t zgCNc_ypV^fanL#^XrX=x)J%2DHDX)3vMhC@{nQAs8DFCqb6QA^iObb^~jNJ!ia4aQEaD8OOLMPQdfz&yB))UY$dbwtn* z!NMc1gqDW`W4lts1Tq4p9&;qFZF&Sl%Sq^g)Hn;%a9n(1wy(Sual}P%u`7a2hVft? z{AF0IF13+b!la=kMI1z^cIG2a;!@-psb?Y>BMH+Kw_^l&U*380=x|O^5XPo$Nk{ML zS?u6o(Nd(~#g?}pBpipU%no%9qJV7)VdI7jlid-{Gs|F?hEKThFfwr*HeLi*X#~h3 zh%wX@S-4>o<8B!@Lf{02NLojv2bt$#0FHy>Cgzz5{#f@>O?%(9^f>mzEcFuUtx~+c z%Y^8zk3R`A-L&zCdSv;&jdBDbGS+olEF$(zT#RV*BO@VRd01bJKO!c3L<%d8&@oeP z&cg~UOrHpikuw7hGEJTRDCDEI*m#D*8nIji`s#$9j+ZC?+2X*EMr<)fVETrTTt;XM z<#E`63JbXkO|vsh)QD_1jziOqbSJer<|OjGC>0CF3loOW%#>5sbvJhRU0?V%Jv2Bu z&(5%I0;J-+d%?k$%uc96S+2u<{&lSPi$gw2m)Z$@|@iyG@Q_MtN_a;jAJCI zkfnQ(Wkf)W6g4&sDYXPx6B{9Zk;!#jW`K*ug1jvDfG-(d!E}h2<$}?7JKV>&TLDp- zaGa*5qZ4Qm9qNM?#eq`HkhAgY0T8R@9{;xL!3Fnu}@JuvDqb%JZrh9?yi@*)G10K3S6vT^HLU?j_ zU?MOSjTT@h!gU9T&PRvPQ@k?hD2(lWoVpO3A2>QPaPXTBTCO|3Z8<=y7}K?gLVAWq zaUh~^k>k zW)?tF;4hp2$A{n(QV3Ib>(+J-JxFE>^%mlz$e7`{R1YRKt&I0ALT@wY{ z$KZ7&99tL&z!IbrnVf{cb5e^zpBaaP#R>on=;bKQBt?i3xfBt^1r7jz#lQh!1!anH z=1d#04WJ1g&=RRa20-&-dYW<(0l_zplYL;Pz@!fCCK};@Y^=n@eFOpd7oQgcla+*q z__P_Ckw;PprbGH>gaZ0_L%a~aU+DxXOU5DlQG@_&WKV8}=fVluxHp12rYDjzF|KLh ze;Fh4{Lm$)XZR2>97!g2?(682@Me~wk`%{Iu}Pc`nK(fXl_O6j`iDR_!kR~_YMQBZ zRzNr6JgEg}6d)Jl@qpJJFed=`prZySihsIgpH*P&5pL5=V$Ppn>ybqP98w&np2PVA`rj;g0C$qxDCx}KEKNdkkB+LP&iVWpbQoK(t|q#Rd9V^P$j~N#v`o`A0z1TF5{YUEC#3xm7Dtjp-if;m0R1R> z;_Cr04~Qn>uMn~41}@GcG|~+1blAKn@_IWZm5BUopYtTIAw>|KBfJ1u&*zM>2o~;> zuK|BUTmqR_mJ&q*q%HdDK6sx1PYfh4MO}zXgt63QF=VA67ms>DD981P?WElRH5|N9 zesPqD-1GKR#7uN0rNsK>iiJn#csAs$U@yvV-Gyb6yW!LT!m|-Cn1Ireq*9y0#X?%hG<%dky@0k6QxwBrsAcO z00pG7sH2l$irA2VZ6T!`kRBB~@*`XB4jaaaV5=ZPAS})uXl0{bKppV3@Rr23O#1M| z!w-V+Ngf%2+9WdLk0>QSvV62TFhmj}tt!>gA{~i=^>b*hftDG3Nr(u zP>k!xdN5_&mFeTT1#(jXB_fmniJ6g#6hRhujz=Y=rHGY^Wf0>;Jn72G$WWQo3r%V^ zHh4+^3X)nX9GsIMBTx`jxG#^zCZ`NgMOzf65LJfC1tdW_6R=gX2+~H9M4_M#$khr+ zfx^fE$h$t)i^_q1Y&;b}6Ff(uM0!SGFQAbmKgmoF;3o=>gmE*#DW$;1K;Ji0&rhW( z;>w9QL~?3=fF+O+ctccY*tEz($f`+WRE5I?e%llgfpe5UkO0XetI~l#83jiH$H8M| zMvZ2PTmionWt6sMdPrS@C~uM>a2^599>gPd7h?z+DGiklDmG*(pjkW#I2|i9(Ks~A zPgLoR+6@Yku|V4)K2cwA1wH{9P+K6s^*}O|6{XZjAPBNp7^sC4$E;l!KoakhE1IGP zKyLtv8W_)jDTTO(%t>Ri3AEdEae5R(n9tBzl*quvf<=+kF~HF&2_^8s1SL@22`$o4 z#Dg-i^1zmh5H3el2B>ILVPRf}3{@6xJd_%%2skZ*r7BJZnN6r7M&o!g$2Q1as3BV( z`5Csalk}mmDsk~~eIVnEL4(i)^oKFX8_+@)K)yju3wMEuiv^2VC~yO%55v&+i7J^* zDuzR%oJ#PZI*fx0l!x;zp^Y|{O%_4y0epuXE9l3HiPfkZ(27d092od+(3Q=R9pVAW zcQ9V+Or&r?FHv&^4$9316s@Q_xWPUV0SttgKokZ=9cqU*90v7ClWM#47l@CLl96Fk ziW0R}%MUqphfI?g1@54k=q2?Bkwkb;j1h(*O*lr7DO> z9p^&w64_ql=-5dr>Rr^JBVr@f8yT`Q6pDgCLJ^Dtph3lrj5@(PrH<)>WbzEk zviPMWGE_+xut!xA7l^#D6+QHP@@6Nb!ayY)5X>P)(Y~#Mj?Nh)Ce~?)1QLWqX2~P=r4uT8boBoHbQ1DtN3J)Cpt2*bGIe zw!nh)4}yL21f3eV=*%YYEjTBU1mYG&4hIBYpLn5P;C1Byi4BO>2 zNnbNjwcE$P;0{P;LV$is9Z)R7#1#-R*gAOv_&1_jL2<=5e9~sh=bVp@qR9nQasYz4 z_$-HlExI87bV4|`00>LgOo1W>B#Ut!D$8I}-v(F!VJVxCc~cyvXo?2_Y*OL^l=*VS zIw6t@0o)pazyOO8j{(Ub!H~o*@qocDzEUm%Z;izZ$Vg!?WpD$_MBIo*kU{`&QMOS@ zs$ilrAsd_gXrD3xdkM%R$;x5OKuUxvz*Ko0;4D_>0}Vu0g?>|8r{IGE`q+p82WJUR zK%TRxVdKPb*HlQs=VX&)7i5Ku9SRBKH~dWDNs9bbRNZ~5#|eSmq(VT=3{jC?09SMb zPX(w<5P{B~PO=SMmg># zePUZG`;AR^ADg4RNs3FMFylXg2YCa*BM>Yb1{{L%#O}zsV=PNBB(wp8Nnt}Gpx{|3 z7dq5I#>^8AX zuzAcx#gKZjj#qw3=*XZz)e_HyVo`TC2?N4u;wV!f3Ro;dix733Jo%!N5ByB==>L2C z%#-au_o17b1j|D32J}z}HK~T`up>~V4%t6;h|YnCDKOw|zt{e}v-x#XuWzD)OwFF` z(-fhclMetK02Y!aQO|%bp;F_CY`-|s^}MrBer4*Pf3k^^2FMVPjQb=dV*>JGYIp=) zkCHFp7I7hUKJ@hDbI(5cUw-jF@gcGS4|0aRL9(DwLUkFQNC`6p?jfb+zeuZiFl45` zh@46j;fwev%A4?)fD4L-8Q~p2L|U0dAQ;F)Dx-Ypn<-lglGis;)I&~qmDr~4#)^Qm8{1Iu6?i zo_si+s5Vjaz>FA-8U!LT@aPmpfZQ&wh?GB|AVmlgGraZxV1^s#Uw;QNf{YHSp)Axr z=#QN7F8CDaGV%h*ABW^IfA{>eO@ayp+hM+BMEJhc!ib>;Q|eJM>oWxa0wOa(-?8Io zpZw5*Q@I0EU|gb}2Nftl7z<&)g6$K)6yQm&ig(Tb&qck7LNy_uDiL)TLLUI1${-1~ zO8}tI1f79Li3GeXu6jMEVJwA}J1jw;QC%jeLUZ5`V4|r98B`U3B~YLNMHiUQC*Qj4 zR74qvf`agn)SDTca7sKTx`XeDct8=KQW*88NCqa(Rn%g1pj+etGRWKjy`X+-VmJ?G zbwI<840z<0nJD#9m%}}BMxyEi+{6?G-{lzGQP{^BDKqt1EFlX;rm{(z3bdkDpWy#-LnF3P5BNlH%k=??0Ve&i(IRbA8D>wrvc+r>IF>V7lVTk$}Nh;b8ZRaqm zsQTL*NtCqy)DNL5Dwz_JMPhzCG};xbz}Ale{Es4rBz7r2fFA z8BsXJdg1sHlBn=g6)j@|QHKo^)CdjaZ4@{|3VSdIki(F_;u43aBuwo}Bod-ZCx|h^ zkOz%S#!DfUs=dtEAqIU3Y|vzwLsvu*>i2?H0PErkkU>b2LPZ&UWPM`6f{>;H36)AL zqRdUbm2x(v4631|FW3fX8u^R9NCpr`&df|uS>8S)V`F3(qWr;)k_c8YqJASf6ZXl{ z3AAoR>61b)Pz3h?VM}2J=)ehzWJG}=+y%=dDR7_>65;^a3SNan;EoRUMQXlumVHGQ zFZ2pGxKBk;R4B+Fu_iEyE#i%Uv}k}_1a~QT3(rqU6FYDwWA|wx`77-vB8j}Jall74rl9rJeD>#q-2$RDIsZ|I7CM&#D=V7xvQJDsq z_*_F0i=d3^Lit-btcb{fi{st}g#<4C2r{3mLtG)jNlDx1=8K1-l9|M?E3mjCqjtHK zB(gQwBMZkEz+j4jIWEo-{#cN~K|)v{rvbikH-#d&VB#ruQBK~MtAO6391CFuAjrhw zajk$vnRkbIXjv`-o&><;kfs2}sn3A{$)!1Rw(H^t1epL#6MYG$#C(pO z07slcZ%Bn9X3<-6Qg$g2EG}B~5E)U^%;o5<4VqnM#U@0jEuqTtZ4g!&x5V?p)Y#Np- zwnJO`iG*B&?0}3HCX3Pv)B{k6s&|Z*a9DZGsd0L zoADgTJt}4_hghj<&LB8KI7%Y|gO%=srAVdGIdRR7vL3;Qph8F%l8}i32`Ko1OgP|1 z8*ztQM0jyr3Gx#uGX7Tdr9MVFf)9zXOw=tRkF_XTQW~dxj&MbgC$kiN0ha_yYM{nG zsS(H@bOm`Ju0wEL1N@EC$1%mSv$(ZFYDry81SGBxxKt8Fxf&bfvH<{u@+#O~3`~pz z*N_>2l{pwvdI1@raDWU@4BXkG!a|N9`+`F3eP|%gHPd5mGa?kMTbKsib|mhnpe?aX zR0J>Jko__W2?;)&dN-M-xcf(;20P_y2tG^d4@!>9hQs4JLB=|XLF831r{F#6AC$B4 zDtLUch<##N3Z{d{COCmM#DzJI4FOSR1JMKBBlJiPOmP5vfU%)^36YB9C0PN|l5W8Z z0Jy-m;!2_19hEe0osgcV+*Zgu;6A4)fX8loTR_>A*a2 z91z@yinHfJ5S6HXQY#2)(V3zzmkGrh;O4j*33o$a^l>^qP7M2?8h~}l-NBQS`;frl z(IH6SCtz)e06tqKqo5XvV#3adaRs#?(gZvRJaD&(Lm?(NkWv$8x2Gb zWTfm1J48EvjLIriSdvM?24zkvd{B8}VCIY=^@L8Uo}RYB4bE(HP#9|a>#b`PGR@TPcaC>%lK3wn%7E37OG6w<&W!;_Fwi{c-PQFs2hCKo_P zarK1i5_gQaQ6xMEL5_uB$fWIHvq+w^1BHg~13bgaaXSy_gxU*=k!`R52i(R{3aRa% z#;c0$bD4zmEv1qJ08k`lN_J|@hQDykG4RF1ekf-i%-!5>m70JTsPAbrAk@uNhD zIFKHb5R1sc80=0Y$53S4N5MMqu_XG2#f=HBQ7HzEREiith=H!7{LcM9Ag>v6#f8fN zAOdcaQ|(a>2;{|Qxfh41(Ev+CO-ON06s(PM6FA6Q6p78zrY7k+JSC|ZK9kB0oB*_- zyjdQ&&mrv~YY6i%t&H4xd7~+ZdCH6kALhr&1 zR@>_v%immxF6qkD@?ie0;%A@Jyg5>9%&g{bdz5y^>M&;Z^1uI0p4-Mo7smSY$9|7l zTf@=ux#j%3zeRgb&!oNJ=N+H8^TuX-V0L)APkdouFLWU0fK;KlT-x&3fyuKc8nb@2t!QvxE67K04qTNi~ZJ1 zXJ}W`H}>M8rO``YdhK%hG+jzZ&HURRVXq6DBkkGDr5&A`+(i56U`KuP(wA+U;^pG==CqBc8!P$v?<3{yFqPx;^=y>Gn@bY zc9WKk%zD!k`Hw!!2K{Nv8L#E%#4%g$dVPH*fA(RXm!^6)T{HjTQ#?;Cc1=xY`Csgu zxgqXt-D=4$U4#+KqtZJwe<6SFZj;tNv(wknn*Z?AV${i=nep~~j%IVIEn1n*-_7Gd zxB?Q)|MhoSWzApioSe)5h346ga7WkkGka&Wo@8ixYAOHWXXvrfIoUq4mp?%B+}=*G zP|Lrt*Q5n)n|3ym|09n{O`jbx@)zIJq;+=sU3D`b;L&Y0#%iYAwQ`H9z@(c+$!u7%yag~z3?V6?uRKlVjtc2-8+X!N0{*+{21 zwb)_jqvCmR(@SQu{IRF$vM|!`>SGT*L-XAFXy~lxqw=|>w>CYKKPH~Hdq(HxYWa_z zq5ImP-eykb|9o$gHs9VhG`O2T_9eP3?hbB_@8&=H5?xk$gRbGF{0*;d()R4}w#Iz^ z*q7-Nb&hV1tmZ%ZGF|MEc#N~Ue!oeJhZ?&>+j%FC;nc*`^l%>hIz75~$D?Q>f8|Gc z4u)oTmV<}B!sE=|-f$4-&wbq)E!^sC3)B2%Ja(roq28M>h{x{E#^QATJ|2Uy#(){+ z-~R-ki+cyMh%M0EwUtdSx8(YLXSB7U!0sK(!~4$MU=Q_nPxfD`8oOOmvnMvUlD+uS zmpdKOK9atqJ>Aa#Z@JQ*z1g1G{9D3Q_3_boWj43p!}L8?BkP;V-~1lh{Uv`0H}x&i z?k$GK@NEA2Upk{T!eGK)$?t#foi{Y^BwKq#u}j=Rw=|Zi)|p|`&VxsrZfx#xCeudC zrTr|muJ*RY_IMzF_PxxRjdVnd3wh@+pV7u!cXlU6^84S%^j1AuY_#WZ`*peu&y9@@ z%;ptggk8br!rESb?tM)+H2dymqx-V84CLXbZhTJj^ki=b`+C)5d}wI5zkhlpU-%U? zqt&Y~r1^V!>|1SgPuKE4{*^OYi|LIInEAi*xUwACEkXX0*Pqck+uD|D{rTNIP6Sg6 z%VGX@9?hOLIKTWE9%qByRX@nz_etiM>0Rg;-pWtEp-CIrnVIzCyz6nA>l>2;?PK}- z#q)IAK#SFuf9egG%A!5nW9B_iunnt!U_4m3R8&rW^J{Lp!Pwp$NYYE+wRGoK-*;2B zwXfde=hy%08Le$}d8KVI{~u2_Y0b6R99hfX`Wt7^&TM@?u=DSIoXJ}UrY6Ui^S^!< z&&hbtW>U{z@tZtbTZ6N$>-j6*$fA^CtdSjE;(ziamS<6501U=l9Xv`bQzebOq zm3X#e_Mtan)!WIIYv%@!&YC{jK9PUtF^;`Ho(-=rU%KA@389YFv6*^XS1vwnwpfGR zL;2<=@`ilrTFAdl>wL@D&{~+k^n;xF94UYu<@MiTPqUtu`|Yg=8bm%ZG}#FS_ox5rxZH~tzT@0#8i?h5m_ zeGv8N8coKmy-P>z6Vu^E2mMB1=1;x3>H6)J$;NuerTtjeYi`nN3wsk|QT}&tKBG+y zZ4aAf{=oYY@=WJShq;k=-G4?~AGOnVJ^z>Ap}BY4GA7#czY#03tlf>3TK<=G8SRet z%(eXOAEJ3anau`+{N-<9z>)S~bt7+lij%SioKf-cy?_hfB##~+_16j zIJ+B{_FEg5{Z`H49cwQC8e=as((#SHeDFUo)^I(IL{n~f;H zTRcx&=42M)uMA>*b*sX2a?0TO7|n`_LI}AQ_oivhwfnxY{*N zvXZ|jXOQ{vY@jP2;IY-{otPQQ_wzH?&&92JZRye@+R0lUxao%eDYtj!GOuK`r&bE0>!Z3oa?*W4-UMzdh%y4Ex8)vYdYRb5NRYdzjV{?VtJ z81(k4J2l@j1Nahu-KLGKb(K~B(>!55kZr6BUKT>#e(UkwfuiaMdGq$FC z@^q^yfBapyYYTHDed}|?hdbq;(T3kXY2Y;*T5DI_XRhL?RL7R?B$(R3TpQZ%YIg$g z_!}PQ)_1!1d~~-dzx%znYtF#ZMxzIOT-QeD7RPGc#NWC$XzsP7DdD%FO}Ev@+6|&F z8=GD17#t$_Hnh#vt_@=>ci!93Iw!i5X!Za|YhGv_y=}v$pA^YXlzO@=?|IYk*kKTTL zRBIa=kzY-|Fw+pQ;70LYTi@=UomvMPoXMa6=xsM-(a^|bbg6QI4j%oDS89!+mN~nd zjHj-x@9O&S5D4tf{P`ceU7LE)6qV%O>|eCI@|gtmiLI| zP4Y7%#!kofWd63_XlUX5;Oy!M@lAECZv+EwK)9)EDmZvOxZQQIDq?WDi*G(-1eO2vE{~0&jP6p2i@7|*9UfuHQiCtZ*%{N9vfi4@G zAJ*F<3+$*mO*=bNt7D+KhPK*S^C#8-Hw`Tw@15wMBM_@jJp&6J=)!k+)3V;(n78x% zO?S#Uw!)EkZwdF?lrMbjc5QHOU~_PWoUyL;P1lm)1mvpTY`FE^l{v7E`g3G9-Z7WK zJ58j!w`;AF?$-PYp}Q$R_1m{={bRM2WSn%fuC;ayZcS~2Z`GS|KWU$v2II2Z!8N_N z3y7=UtjE(MGi$&m^nY%0V7fj3ub-@I11;{t?lh>Wp+!df)W$HNsiBQET6AXv%%na# z&>xSiE`ysITCl#cGMoZY8rpjAYUHhfP*kV->bN^ldkBeq?RRe1YQu}}-UgZGo$}9# z!DK29!FTFyy?<|VZCd%2k&{%Fts|D1g-ecGx`2!Z_{>WmIs&DaCvpjn%LT% zog|U0Ysu6MxBti^oATFw_VydPhuqrsW!WydY8R&_6NiBs_*xMU#A+uC(hI;)L(uajVkH$PkL?MQ+?a?CsPdp~!(7Iq|^3jr>>uJ!K?4R>^ra;i7G zUA=SlHquV@=iHXRI<}Jk@N;!-Bp{MIw+equgSW2pj)y(b;fFvJ#8r^8?T$)~a=$X1UvsdrhSS3%z zkj<`9r;jwXu5C>Bv<#DiK7;Ob)Mn%l~xy2r2cHo zTO(^NAR|1~a5`e>03-D#>)Q;5hXF_G&&|$$7c+je{Bvt!bTZiiBw^t8_`(#X$>Zi` zI%U*=NUW_pnMh_pNO-~d7Oqi)a)`TAGaYRX7)B0y{m{V9;MipqEdgF`6g#?MaV2T2 zT~^crzz7R@+n<0QmcsejU1~j?SZA9%vjXyK%HRIg+qF(()@f}Al{GYTJs3%P0A-x; z?$nmu0VY$O>O1wI9)rs`&&AO#oc@WgHngRlM%&snNKAEV;U>ZATK+lSY%h2lGyR}3 z^(GkXUkZF+Sko)-=DG#d<~TgdowDF8<@o3<-+VXE!fru&L8@}!qPb@@9UmILG@erE zyZVbSyz>a3-`d%YhWaTyFd=tD;3vH7thkbUmftB!&cZ!pXW^-)5;bPc4-M;1fdVryS;*-?n3`lchw zaV}{aye$0{iT4rZ$fb?gzs!b6&X2r@iDYALu5qapM5cbkJ@{Q4b^Wp>h!pe3~MlGqDh#`P=+J?E?-E6<8= zo1f)goqLvxSum#JN_OZq*O%v7`{ytHyr@Op_{zJXxA?i4)Wr*B;aTxThO^v4$4Btp zLT>MKdqoeMM|P)2`Yt;MQO*B@n_hmD0oI3yCTf=%1SPNUzvrfxbD@B%eZg723=y2= z1{9OSSK%hOY#{FL@kO`h-pJhQyUZY<3>cv4Cmz7QW^Y1-;(<`_x z!go)7{VaS{be8WB7-!*+r@;JIfVZZw~zE~lS`)7H;&gOJ}|;+H_U`q$jP<3(MQm#U7SQEqIx`$!{9 zZFNmMm*Lj6@mub_w>W1VyY9({zV;74mAC!bjV(tBijbVkYl$GeOzW;&HfA>#s94-m z3pe7KyO;96{L?!+j}YiC7{oq5NNc!}PA{+R-kmPR%SrxofA->@qxkv@26N62*1Z@` zr+2Tcr7LU8ak{=PhHE*Bv0pHpdw#gK+4Xpha@V*1^p;m9M=<#dM)A&%(#21WiG_3> z#pW*#c#STug&g-8?(zx`8|KG|0K7#@=yHvZ2rf8<>#OKrkhXy_m#1J+n8UKb?dcs zTA172sP*00$X&PCwYk|9S+l$Bl_IG{_sd>M&N~0be?6T~{l$r@WUY(vmdfetxLk18 zExWh8l#A8(is2=TT^P@=+?xNFYdYE({_?7-q_&Hz zZ?2sFX>QTUp8RNw>{&A1&wSI+N}^jlmOM9)RY`Np<>X)g>#M4g=`IHUUF9@?o~SCW zTYN-O-MM%iP|UV?RLr()RXnzQELm&Op(L$&tV+^au2RgTmAtj+RPxs1v81gEe@jR<1grnOWum&3x|LW>wP6`3L^iI$)W_>Lkl7 z8iCdE7pVOUoP;#n;MVVuxP@>xiuBq?jTcr-t>mAPgjUke=gyf2^z)%C zN(%ak@1D+2{I9FDl7+tSA5Q15{@1Ijl8KfkD)v#5(PAGZ7hUwDr{C8GW@y%jtNjEDQicVEYISZe1xQdDIac9(#le1iba->q*;PA7IT%Pv6xG8#=equNg9haOU_ui zAZc19Z!9)h(#B%>C2f4)Kd^O48!Hi(w6V~8MH}Zr?@QkJiSJrfNgJ1&614GoLzP5v zZ_bES$rKk)G~t#?rdTY!WQxVnmPD}-lj4bmeODzbtfVA$Q4ztp*z1nueRJ^v#qz2p zbPHBj_`gzn6p@>YDHLsst5&W~61A$QREUzSJ&?QNYn2QXJ*%D-Kbs4Alyt0kEa_Mw z8pXz{)kr#4IW>98B_%5sT{5zKxGEXh;@nTM=4XY|Em_$^r&3b0Vh@U$l`7Dwcv#V@ zcv#u0SXZS2CG9E>w`5%phOA2JRmOuxGc%U-Yw4%7l7SV0|5f;gl7+3DyEQ+BqbZ44 z@pj_BUsaV{tTZg)VziQu6{jcZShX+dBqS%RPE-15$;(PdBZ5Iya$ z0d-r-yDI5hwR#zZ6^DxsEBPzhmOqvJcLh^>DF3QtYn71SezJ1)lmFv~cWCb%sGPX< zD9P44TPiO)eT2H-JMXK!_|~K3N@BoUkCFw+jz>v!OU6JOMYD{*H^0;h~(H+WYjc(S4p%BReq&bdHw;@e)F@Z zuO$Zr$n!-g3C|VpUOf7o%B{u6MNh@P7td%F^MHPT@Sjf?Kl7Z*i7N#EF4gI$`B`I? z50t}{T>QCnw-ygPr}F%&B;*gl;!i!FUn!8&DQ3R-zim;mzpc3Axs_`UsCfaGOKM&W zJ*cR8F?31IGjvr_^UAKI|B?K>G8KtN6-i%u9mUe)b*>OBeeo>(aQ^iFJfW2={ek1z z5>&nPVbWAfx?Xs7N!NpyEiG1b{ejD=O1@rU2_HBtB(M75twsM;m1__9 zdL&tue7*9-C0EZUtCFjiF++ND$=9=Uiz`o(suu^PXnG~WlBZw&C#$L`dg1saMX&b$ zwL8#Z#nY?MT9Wiac#@<)9}=sQsh6t%)T>`mX|HG{Q!me5G4(h9onCzX4AS`gM{X@T zuVqIiXD|AcoW0PElCwYeZT;cvD%}U1z35bO_M%hC*^6yUzWzK0sY=pbEluKvlDQYV zSFF9-eM#GkuvJp_m2*b%_=_u77me#HepM3p%595`Sd#a}eIgANQM4rQ#r%@ESLhA7 zKVRwpBY%2Z@tzwhVR7S)m6!h1UAL5U{-F#`K5=XD#v3cwRwc1d1mgGvuP-!JOmKyu z_4Ab<+^SG)$?OXwk*xj&+bgMkwpW$ZzEl7i>q>fG?69Qw#Q{rdUoBT2u;TaS0aqo# zuf~>PSuy+oO1-$NiIe;4leZQh{uxfLB>TmoltjNcl#=8ZLXiZ&l%7gbN`_wqXvy%O z573h3S97xJtI;gM@{2{6EPr_{-Bn5TtBgog4obRTOk7g^N@cDtK6#S!lbnC?s^^kQ zDYBoARY~(JOiA!P2}^%#@ytK}yX&qHe7{_Tw9Jz87oU)vzdUz!5{mQ}z`iQ!eKnX= zG|BJRCE`^Czf`oUWcM$~mr8nHoQ0(K5rwHKW?!9xr1ilbB{nLleSwV@6|Y}9s^=9; zFW^vK`?*_-Z=L2)N}69BN=fqz*_0GNhoY7AzF51Y_eE&9wK!!Ex)i$y9L4d=;y_9Ai{(g? zU#W`}rDFPpD65j{FTO`z;DG8Ex=~X72=RdGKgbId-LD*0$@hx{pyd076icrEz}+7Z z{s#!NB>k1Qf1XhM?y6+|C>=oe z!HobRo$^M2kg&WFaDjy7m4NfU?ciR3Fe7;{;DX?&E(R!-leYq-iHWSH8jG6&5(hl| zlFHRuc{AWZVCrUo5?FaHKnSe779a#xUJE!7SXJH)C{IgdC*=i!15hn52}u2Yy18-{ z&QDzvIK;>5RzQi5RfH^_ZLatSw*iDq%G&@!W6GNVLV?Sh0BFp?O#qQ;mbUVVI)r(J&P=kA&sbAd0RpTAoW)QS$B#VqGElb9G|UawQvn7_&}P zCApoSdX^H7B&nqnQ*5(}mWnr)ZaI_;Fvn+UipxFB*d;Nm{DA~pif1JiFGYHkJfj$V z=-72tNw1#Y{ZaCc;v0XUU3Z1xRr6DSX9&^1;PerA(4sQ%lh;)xnJQ0E(xOr_gyHf8 zkCLS*;VVv54kX!4VT0$#ISNTB_kV?;I?qQs#ao^i$5E0Txt^*dETxl>eoL{B=Lb4c z5>$EQxz|-C=_oZ*v4>)e%Du(Szj*AL;z6_W-|pZyT8{K@+dTESRr%Y>{{N4E-iB|U z@YBqEOYsQ*#Etk;t10^b8~@4;|GLBZf98hor12v)NB^g89MS*Z{^xEE|Fs)e^uPFD zyy^Mh{*yP{m=fRNKiWTgQ(Wm)=6?2w;xFzSJa+w4PkNQ#zM|N9d8O~EKYw{;=BlSo z4OCus#j&LAsb3hXOuXQj*Z$N~N#)bWj~Ok+gIVR8V>>t~{(s++7Y?FuT~NHu^;;mT zn!AzrJ-1xk#t(~M&^9Rh@sq0;v~|k1HpxOCd{(tNk{!Re&1;vRy``L3b!?Yk*0{Li zMDfJSE7$J-m^Vq=<xx?%0UDZhP+{RNn`82yfJb~Ya90+E*rG}qQgd!#r9h+QX7<3G+G_ac#-o1D9xi)ewFWF zF4a0kv%F(@aWkzKElkZu>qRR&Y@NGsx`Wn-Co9+fJRgxakq(Ae>PV}8;YXr_b^jQB zr|(4vzj4raxwyKka!t*=SOL_wx=kF&Wg?*UBKt4<7K1-ux$X*Hju%fvm1`}UH(#WaO7cc@u|&$&xcMUMRC`|=zu49fTjfqSSeM%v4oFLJhOBwB|q9O1C9b$RP< zbg|&G=O}4L>S%fWV?H#s+I&&yC^xYB<2*NgFS0eYm|^$FEu7Z%igE)FMO}&U7c~aaq!vQ!MSfqkw))K~=9M8& zP1Nd3y}yFDUBxpitY_`wy{T?(Yt4pyeRi$+B0s0b?w`0s;!@!K6UB*%%C$G}_9B0K z@R{QKk$Ow)Kkv8>-+m8Q(#B_hjJDHyQLs?PN$W)+K$&4ncX9K4<=UHCx|=UDXf@AH zM{}*H)NlYDg_!V$-L8v0kJ|rkmt5=>yv>SLLRhhOZ{R#S~;n$ zOt$VU4;DACSFSCd6%AW`jTzH9Y&c&0!^=qT`Ymf`JCm))jJOEDTY2#n{o(3j@ywrB zt`jZiURHUrHqklQ6Hlnm-oIA4viQZd$~Dz-e!{1>`2MxWt}AFfSv3ZFhc=6EX#D-Z zM5nLONi!N&=iHF`D6J>8(Y>~9uU#}))xAz*PClMgtKnBq`eLVe;*IR$TkDnQ7BBw| z=GhWILn#+j{LMNe4B9P&wGK68W217snEx)d#-Z))&5gsxQ`**OJiM|{-2D5Db^k`? zMSKy(ZCRZ=Yqg5N@))D)%F@pA%uR@7Omgi6e6%TyBAsxTUpv#kgl|=H7PK z_Do?s%@j1A)Ee8#Z1+g<#8w6Fak77G0{ln2l2oq~*Nk$ArEP||XP23x(Mi33tX$PL zW5fg7d&A{|z9qVSXO(X4epDatQ{8Aisd>GVX-8|(BDCkm9S+v9b2cBCW?kqqI-L3e_CqZ3-@UKNA`IIzYNRX@2&iFLE9;9t+7(~ z){B-sRBE$(Z*EB))LX?nYjtAA-YUNTcINp{dl<^Z$k0kpQTeCJbr0Xew0uLz9_>_H z2<~CE?uNI~RT>TrR~E0ihdm|r_N=cs_v?)E7$2(+uWnB@l)});%%QWtNKk7SZeO*H*5*qGhC~u69e?NiA&k2YYJ86R%|-8)GefUZ;3Ir4F(- zzqdB6MqJrv0I7*f^`V~L^4w@WSq2*w;*d0TArN+^4ytw;y$a%A`wXe0Sc!C~kw)K&r`PEj@U}>{nt1;E12L9peDz|9E zelWVwRXq9xM)^{xV{ImK28Z?zdt6!Edmn=?cGXuGR#fY6-N)SV+}xsB&P~Jd;(Mao z`q*SNFa6AQ#mQe{w6LXTDUg;Z8{%y`|aFP@3I*V`6-6 zQ%(7G(JdNp>FOx8mxi)iw$&IKj;aeYRQ$qN!(EGNb4E}muD z8f$MU9)DxSsTz~~OsqVdW5jFmRvVaY(+89ntn3zp$Bvz>+QWTgtMZ^LJB9Hsmft@% zzcw@?{Vpg##@_zUt<{lTbre5(BS*0|-ri=ELZ_iTiuHwo(aclbK6gJdzV}U)>$PEL zU_AkB3f=$2n=1VF&*18OHX#E^b?q_>T zPCBquMs5~K;F(^E(%s_R{S{B^*zKQm$a%zC&9_wefseWE&b>~xw9mYSjh=awrMMfz z{ZqjsS7aqMl#e8||yf?d_n_BMKSw$JR21!`?4#6T~6 zsPf{fn|3dfWC{hBjZ)~@TvyOnsy(fDYQ}m`Ft)9hIsA5{J~1*n-&U^s9dAeKzQwzY;9Gmtv!f1C*$E+dsZ#(4<9^``tXq6Hc*}gt!K2p z{w{xQLapwqhge4&KknN)df0Hh_{E2i`rvS5wYS_E-8hQ}52T*8?u||Cs)e7-52W7N z5f7zZhpi{I`Q@!}YfdcoRB`1!M977uiP`1i8}H=r_vgsFvDG?OTU9&wTF%nOV}E^D zx{Yg#Yu~}r7TT7#+P2h|#@|7YF~2dtFrwD_t#>f>sdrSW#fb+hH$cOW>{#jHtFzj` zqCMaYA2u8>t`wS_j;uF!Hq|me_ReyIhu>AXzUppu*EoIQZ+L+tSsAfLLa|=Ey4l}4 z6Bajb^X}xsgr=TGeRHT3{=a`1MHsYeqg$a`=_3!LD%~wyU+Fk(AWS~Ym?NDvV{1>W z>E+tsV8^^GJs!{yv#jdg@Xl;;-_^kIj~DDd=^XBvo>5EvUV(yyyB*nR?Xcm>;^ptc z1MbakG!|!zbN`FQoq8DTE%SrUu5>(808$hr&Yj~-hnIX4q$Uhiw!I69F05?L45Wu` z-^iIbYo8pe0|ap`qMz~A%3WG_nDj4l!>4S!`BBz0H@maiwtCoB`8J;&>2?M)TS{Gg zw(>PTJG@~{))vmUJ-}yYv!0O!W&Cn2@2K2Wji+W7d(L;eA2BZ4oqaola?7IIqaS0* zE1PTnfRc-7`+iZmT}x)hC+2z&`$@;$JvF>%D=U{%kPuauMkXdnT*N{C{=2zU@x61E z7if+i%*HK40|7(?(Xl~HE)Z2a7aJ=~9 zui@uv1Ht-M{espjzg;<1U71)76S=qIdGA5(e&Icw)U&^h;x;xWrf^2mUC38&dpD!( zhAS(Jy%)4fC$iK(-a9#V*xID=WBo&|r8@s*IquEc(qiW>S-EuRvXzBZg_3KAF*Vvb zb@+yyY=HYNW%_l^dx`nm-L;Wr1peN=v+bWFb9=lF_RO*erIk9R{DnOU0=AIMwGiMIBl#Wz%lWNnt(KnSS znaMWPraD(!-5x4V6y&&cJBLrGHtLJZV~0%@p*6K=rL~2mn1Cj_Nv-K=-HE0Q*~X;O zp2p(QWL~)}QB@Xkd~mO~t!+U@V>y9HqW9Xm2D-+I)%TXDwOY5b+3jNGy%n2#+CAA; znYev{4qE5r_9C%HW|B0NLB40Ut8aW%3~^mC|32cxE8YhZ>>e5zqH-WWLlBxb;tupdX)nA4fK~IJ_~tu9WfF_aUqP@ud}aOo;EgV(XF0RfYa2Y7|d}YpL`= z*A>(6#}Y@|wmQ1Dm9Bi}{ml8gM-F`D(B5{-LJ78bb(c1~#}Ct&&Qtb%#UmI?*Ra2V z>-ewCv-t=%JGg0@z0$}Oe6LQ-cgMpj!T-lc*nAH^Gd@}-53l|Jo0lm?TSx2crlmwe z>nW{&u63!qMC|YWK;^WyvozngJ+HR-e|-SUYM+^1UTsr0bn;O)GwIA_J@#P($^EY* zpQS%yrkR=XW1vQVD$O^Iem>yN9i3wfSLld3fNkL5^^& z*}e(OV=PcYgxVU?-HjUQ&#o$0pO$?^ng03v*tT+?1KmWwC8iBSIa zqhhjiC~MX>)-pM%PV&S@MC-d)LeEtHX0&(MdPbWu*E%=biIdKn;xen*vn`ggdplG_y%a6%C*JO(xr2?-}XG zbIKkfJt8AV1ebv(DdY%+T3R!W3<;j#wy0F^{{RHu=R|#JlwNdVws13 zi&I$DxgJicffM9!g^{bh(<|vnaqiR9I7AzfgZj$oV(Ug}n_m~7Tvv?rJ5OMv{y$JqKny0c&|C|S@@>ihc4?#|TE1+6ksbynxPrj%NQ z9~bUv!QgApN=4uEaaI->Ju_ihBBbGX@$AO|*T&qyB=A>i^7B7|dbJxvGhM1Sz~3xa zfWb9m*DA{;pAn4@d;(!FjCG98pKm=?9Uh+AnNc3{u1{hVKcZK)Wq1VLkrh)KLB*qe zZRvb5_(_TgcB{YH5{PG29z4~*}yL~?Da7&Y-;dt?)$3;f4Y4>d^zhCq| z&Z7HwCarN*6XoyY#oI-%w&lU~p|ZsE1$t={_U`UVIXVqEynXhT)CL=SQbQ`;h4DW* zoM>S;s2#M(?9d-t?~Xkc5yqaZoKQ%Ff7xdZlLn!kD!%h19`0Ko#;9Va&EAs<*3WS< zxqH>@D^`E|pvMV;A=A1M8J+K7F+G4 zc-_^^UR9rx$hl``du(q`Dao@>qD#}e8}?>; z*ihJ z*#1z~H$V|v=9j$UXY~dbqLG2(!B622Qg3&wbwxav-eX~Fw0nA7HN5;ka7MGW%xUc_ z!v8>T)99&9kBR5fdrZVF%l)QmIQ|qCvlH(uEGd{!{K->l9XD&88}mKur5(vu877u( zehHvWZI#xOS}oh&+9}gyc2x$7A#-<;RDiOqs4oJ{e3_sQEYwHrw_XB z)t0;TE@eCWpP}`OpB9I?=XtGVF7eXerS+=MRJ7vCH*oOo%;G#75?-X>)t%MOcwvts z+UFSd-rof*tar2znKk7sXpkdTi^X3h;Ym8z7UqfvUqHU`gHKmpP&`hfw$;Dnlj2I~ z*zpb^kp@TC7r5nO{})Z$})aebXeF~=^B9`5%v1de1`4)@H3domNPxDTE>w5 z&x`#L9;%~*)6>(%2cDqo>}NU2sm|e~(WjRGiO&+}`{QuPiVho&7ytTMF~vmBj!{D7 zr$5JFCq91wWu`NId0AKcldojVTO7~2+soXK)+Vjix!hPegS~zpmuFZrPJfFiP+wO(?`ifXgPiYzxqFmC{^0X$V`nmqSLW2JU;72t z_q8vuJ_*Zu`?KvyF1d*V;?ODYi5_ETu0E=i>a}7M(jvnp zeww>PuC$)42HQK0(&K&aX%;4XwT`UMQiYSwoYqsCmw9{bWmT)`8DuuLJr z#qu+iYk!h|T%h=xcI^7%ee|iedBY>6$L86JMfJZEmy+{3;BB zzf0RmMG`-$=Kq{%ecP9a6SLOpo>k&ZT2E<OCJ7mWjoW&Q10bX@;{qxzVyF z^+&YHMAYr*%*GZE+hiiTQ5za@l!!z>>Hh~d`}!uz64Xn+Tpmi*S!oRxi*syAW$`^- zU3;^nNcSU@h2Nvs`j_!h^^KOnkxc2-Z+#iPnC)Nc^ULh=pT3M4IrFQ#RtfRU-{X|@ ztbH(J>T;lynlm-pJ2_N5_2bxLia(pFskCE{F=&cy99)E{eGE6!a-t+dtH7*_gnjFeQ?Tf?s9Zg49nEt;e_ zwuh#>Sf9|dV=&XQ+wx-H+Hg_%3f*Xu-D+!Hd%B~#txn6>dPU3L-nN>Omcg%}NrPjJ z@iK=ScGXyRW7Q%6LwO#fNscwTG_X)p!2H0qW){)gf0h(j9sP(Tk^t~n3YY%!?l$C>#jTy=lVosVwx9ZBE+0v(U{!Oe@8{Hir znJ@7J4abZBkGn5{lB+87KV4PtRd**L3E3bdq|!jT6SCB+?Nx;>)%(8h38Y`ud#}5? zYwKEi6KGHwM-YV>67ok2weMwXx;AYh^(4tr+M0R{8_efPbpu1*-} zIsgAT|8vYaaJufh_r1G(`}f`Nd-x}yoo($6o__4~eEPk-en1W5>KAOTjv*?rPlX{b`;r&t&K7A~QzAJ3oco+}@{-COQq8cg6i!)x{5hnCY^Fl_cQ4SSme_W7_Z(c2E0wR zEdvI*=dGNH$7-gc6KbIqXCe=EL`+Q#>s-#ZJcu)iWoM(^4Te*u{Xu-aKGl#4xsBC- z{6P|jnr6o0%?4IJN)3s&>YDC!p<$V!-OT#irz0lEMz>{b?q`@a*JNv^+Pb8eu_HJA zJKj+5YHon*k(-9lVA}NO!qb6KlN<6@&Md<52~S&bU%Yp#keqdeYuWW*#YUX}5MFQ( z8pUS4smmW`H~$Ry&i5dctk7*1;!}8BuV>wfp9(9M!eW5n?>?1HOJ6BpD z$F?Swq-i=oPUE|^(V%Kt&Cy^g_wnxVM7Ke*8E>T>5K@U@!Wt5Ncq3kWra0TylA6$2 z$)gZyy9NgaGZFgRVXQkIf>WvAXdt0Xj3s&o27LyMV}&NBxscjKHptHhPduE~Mk_-2;Y&CP_2n!!G(cDF1C>TC3_%yiqZY)T|(Fy(~qXtK{# zIxS-}1~Je0{Qb>_nTgsso!0w$y0FJwp84Dp7#({8VF{V0CXlTklX>R}EH2%FIyrs3 zQ%Q;izl)9TiZ|9lF@pEqlEQ|QSrD19rdv!&XvHt_sv~%nFzcxsH$CuZbcjlyFBSv! zC&qjCFL6XYzl6oPIqjNAgZr)$*RuIvVpa9sHK|0p&~1y@o(*)v8O0~M<4KHr_m?;a zcS~xhBQJeDiE-g@qc3G1*NsnN5{W0#tr?PS(lkP%)gd^$o5#$Ef|XC97e{iW2dQa` zlk*)C*HhU4@N_Byb3X;LG|}&|^q{00IvRe%O!5@Y!__`G0Xq0mboxGZl83c9)vy8L zw|N}l9(~MjO8WPg+#}M03!f zC+8l;@+RWhM#(q{>am<9j{tgO+04-@8Upflw|4^)bY_i1|_v@p0(Wsj3iDwE8 z%h?^j0g1Wi-ICLV+Md~{NsyG^Kva@~vlFw16NBCI8_+Os#Fec#?JYZhi*Ir>Q#I-+ zLR{sf7_Ga+>HcKK^j0w5jeml>B|SA#WWsHr+S~L7PGq|$yBfPp%?hm$cqhBF?Z!@i z@hEy-_&nBD-8R(WG^DH>UPR-}^Cf49?HN6mHP`p8=dr#zy{4xfhHY}WqG36^`~_Oy z$H6;$dSY4&94~KRbDS?gN<}@T*-jWy%!XyucZ;A;k4`li9Q98x;01h6K(Sq8qDwTMW&SD*4i32`8&`hSF}}XgEReiroe)%ul@O$8LYw~_&fXhl3bKy z*Zv2-#?>hAbTY1*dj0f^q()5-wYT;e#PT>TzVr7cv9x(RCI?w?1*{c#0M~J-dpOl( z%17=9P|wWluxZCe0|MAy1~IB{tj9!1f~HABO;XdXQQZjB`-fNY z$&pvUDjEkoLBw-#V0z*e#3gb#YM!e1Hk#rLTF=GI+WLD;kJK5jf?st`G`7u|=8uD~ z0;b&A@dog9(%?S(Dn2*SHP_gr8A$)wtMCQI>e>b%_>(TY;WfMx#lkYZ<|_}pRCd$2}Wtn?)S#7cr^FUdProPR)W^5IS5W&IBPtn+o<9-oUOA76^)5H=E=RC5Fab zVr#EcN(>ml#(TZ-1}IwF>=0_p5zND-a}0aQkzAUsu7wvl--@`F>VAL33{%4dm$TcC zf$&Xh?PDp!uKDaSFlVhXD$rGSGlHwf{5`T*yQdYLFfM2YmOY z|HOi32IPUR8Dm$`z`H`fOSsy54WBEU_%Cy5Vv{`E1MbeL7+QCUV@-N%kKuDQy7BLD z1g}Lg=Rwon=YJD#;0vs;Zc}GXaT%>!#8_KT0-GbF zP#kvHA=OyW@887PH#Wtl`b^0v`3H>SSdvzTlIdz=F=%y)f$`L2uL*{{+pqta7Z&xP zKans!I>-M2Eum|=F@$e&0g&F<{zt?jwL0tKllkxwYUR?h1Mj4b@@QnU;Q`0zhR=c zz9Ym+-a@oSW=Ij)hb?d8 zw3||Cx5qp|G%RQFw;|E{W>Woqrss#ZUh)=pi63L{lhzLbQl^g6629f zGMX~b=LdhGLwg&tVM0sh=^`3;35{xP8v-O@DPp`6UVf&K4NUuSZ*3)vtydqY)@iZ8e)c*JWG>rKJ0$T%d}gl&u`zSA zW8(%weP-T*sIIn-##uz%gWBTv^3ObM!7e2m#u1ju@xQqQZ{h@RFqmu0kv7+IedteI`BowAEsmfa}Jmd&V*b9Nl8W zq$fhjdQ-c=q#eR&cEUSgIKegunC^6Haw=@v;=d$d3U3Jc=}Og3r_3(BBUXx&U5DMWM@-hL-flBCEkX@*6R59XO00LS}`s+^~{*c!$c`QmDVQu227wt1B7XG6WYh8gW&C?mpwj@UucC}d02JjgJvEs$A;Hvb8~R-aL_;l z;@rz{T5o(B?%rfuU(b*k;Km=VC^w;8=xG_NYcfvdxn&r{xj;*6dLUtXyU@BtXx4%~ zvN7om%drS;cD!ZSFsyy?6k6$W%NgRFyMF+Y<$NWNEJy1Xms>W7qs~UvXZpj@3PGsG z8}*pnVC@P_yv>>IZ!^R823DZQ!0c@Ouqm4VdIetOSb>%B$hT;1APPm4FA|S-h@Oe* z@%k|9It8dn-8e+mj1Q^fh+jR$f_!iTgC5u_IPhSuh)!yl$|lL~yq+CdiL;BZ#9ojX zDENnGLK#Ea*t`<+2u=j1JSJL&R^nsbbv0*w!hGTTPdpmceTSqZMc)bqhF4kib-&K5pU7MlP2wcU==2yC<9G? z&SloIm8&gPV(oaQSLfl#ya87pR$~@j&OR6XzI;<0r((a7r{?#orMhW&w7cjLrxz`C zb7UyZ4?r!=j?P&iCs@on##UQa@m(BC1Zt|e2L{)5PK7ON${8GOH%|T6r(&`0QPeRu z35a7YzI*!`b1TL6vFYw{(@S*88Z4zb?GCk=#;vcc!Cnp}yr|z{?B$DVu!`{H=(yjs zN8xP_xB`nhxWE-b0A=A>tIZqyK-7j-DCIS^{15Nkq0x;)O;jCjGpxIsIpjjvg% zuJ-kg8EZxxU$c^(ON^SXAZnvEcetwC69yTlv3$+WW`C{Uux3*mUvr(@(3W-bbU z*ZrtTuK7?Z+UE_0kX0W=rrj>~>UztM+0hM_`{qL~s_={%DO7wOMF6}$kIxxG9r_T; zfrng@r0Y~_9a#==Wfw^WP@q{;!x}0U1YD?j??T~k#pT3MS=Mx)7D7F7MUp5h0y6!h zRJ#^NWop%ek#SSgp@{P*5EO+QU@kTH|REoNl)iaQi|iV;({x zew3_7HE0wpM>PYd4^_p(l1mFYb*fT6h+Fd&XZcVzUJ9ccwc8`RQLNnOLCJ%Fj8fEr z0LsIAQKdYL>eIA@G_G2rEWO|B(OsH^Z2R*bRSNiZRF9@Tl4K24*-a%!#sB4cMKpF%TWk))1mqbOfk)ylftgwnYqHbA37mpl3 z`EaL>s^KcexzXDvq1ZbJi3_#t=PM#KK=Nq;1@R#s9}0TwnuPbdeSR!Cth!K)K}K?I z$>+wE_nj4P+}B1fhya|hiV_In^B(NE(<^)ZVHA{i`!PS&qsXXm?)JH9&D0-dUR0En zQBb!%02Guws#{h30oObZ5Vhkq9Jb`sQ3f0XP-FtRAQasbkYv@5@1mNGjIu0vtuy38 zJ#IX4y4-HR6UEihpHi0N6i~KCb89|NAP{zX+$dh}4Y^z%l=ep1b+1Q80wchY%ZC9P zZegNeJI+>{_vt<#3U|w>|DuFtcR=xLv|4w_FJoQ-$tBSzR2rbmF3i!ZpjL(l@QNcr zX$+5Fm%RXwkOwah%h*KfKTG|?Ix3~3GP)mC=KUy&;gb9zdhT~AP86C4WN4vzFJ_1V zeYjGL5w4IQRq0)_+lvC~Dr&F?bkxvrBR`Wzr5~zOak_M>vOYoiD9!>n1E2#TpDV0+ zB##onuKSP*ALr~-P@f*3mvGPodP)8PRdb`ZJzlE#eNIJD=5-WW@B?;$0VvXrO~>ZC z!(j}-wN^kLnq;qr*WkFdPyn?d{63Ei7*ciBh2v0x;9f74av#T?-iotAUKuEYIl5Fg zI(x%zEsSCknqTz-ygYzmlu!3L2{4e(0L9|nzCbtxd<3|nN~t@fyRrNLvbZ~)G71s- zLMRaL@k#_3W1IqDGB~S1Sl1PwECY)H8cv+I8_q8kppZNUwDS+3Y&@V1 zk391skPFqRs;I1u^YZCH1mK-J6!5rRG$Rh%K>7gctDxq*s!1MQ@_C&eze^8=d~yf~ z?Uq3}u?KVt{kV4w>=u<*9S7JgG$cttjHo;1JPAb zXq=o6335OIyr7r#OLFTVvL0+IswDXpHyuMS^$&!ACAuFw?sQ5aS<++|_DoV0O#`Wu z0-*eOCet}j_0RxN#ZW*AyF6|k+b_#bJ?!=R<~@LP?2sIG1HW->K0+VlbHD(gMj#Zx zYUw~g`Xmq9sb zaHkNRLWI+#5X>?2vJ8y!dcjQs8ekR-!in-T03|oB9s3nslU0z(iXdOK2A1T*=Ya!$ z5U_ym#Dy-*DdhH3q9<2C!9h5k9t=Rf1mLsZ1+qr%{*a{lZ~{PUWN7k9z#*_PcX%F@ z9`J_&$Zk?`mJGCVgTjPd9uO0J3Upe-+7%4<(01qmc_~aYLV+uwC-s({U_LT|H4Yd1 z6n5iN0heE*ZPmef6*{$4E8iqd54*AT1g9Py_^(JIk1rfnTqwQb)jR+UH6)`#GDfJJ zWPmGxWdgc2z=hN43xk*uWpyeNj#>8TpdKirMaM9UTn^y4PTBABg+NYJkR<>C&cGk= zNZv58+lR7hpdEzW7~pK0rx7}&0x%Ij3UGnv0`tIJz>-N`2zdg!8=M|AE=**PgOVh9 zAS|dFl5_e&Ch=Ae;M9v<_4`#1gbBq9I97=rP9uQ>Km%pIB(LJ~x&t5%5JA9D0DW$! zJRfqS1Ww4K0U;!b_SH8?Bh)aET~a`yF%_z46ac*kX3B^PkX12025b8^#=SJwhBZ1 zGXAgmL0a(z{s9%U2GDr@n&JsyJ%CHFLKOh-c7iDQ@#&CHb<(1nJNXy9I*sTO&Dx9~`6#fTY_=@(F6y`AG`#%en`w0a6%_0MrnhON;h$XoV06 z!4Xf~5bQ$WC;@D(9{>n&2cLi_sDwRWH5kxOyEK0m0$82cATKDm7ii=3`aNzh1j%qf zm)&7ibE13|{vg^q!D%ZhCdp1PTd-!`rAl5MY){&%yh_MT-K1?9%27>BPQ3*a2290(RZ z688Z=I?4k2B)kFV=LEWhP>Rg$S2^Ep<~W3@gjEHiDr6r&(H1v^2ap%VD+8&+B{ zw?>EE$ytgMSnGFTm%taDe(Wl~FGGF-mP6D9&%z^b*vl!$AeZ?dBB)^iH_C1+Aa!nU zNcRG5aMnOod?*C&1jr$j=;0C>@CTxTLL4X*g1+I=Apk+{LxZA$Q>r1q0tlcvMWCR} zgI9+Y?5Bz~tGXuyq6X_RS_8p*VT6m@(Nkj!-UF3 z*dElE0}=OtdV&agKxZMV0KFB>Ps<+Wcn2l}$qK>=N~}Rt(Luc+%Oa&7h>G6{cmzeA z_n-zEMo9edoL-Lx09;)UL9|gI#DXWwI!a%=G@_W%G%XvFEQA>7YG6Z< zYr`PGP)lUUb^!={0Cx#M0Q!gl9;BGTVSD`{P;!@w66v5w{&^YdDnug3Jd_Vz!zj+zz4#mNSY2f_hM&cm)C{O z_RhmB0Cgk;#Yu&-g%J&0Aq|1}KmzsyGJtDXiys;?R0Rb@2#T(*f$arwC>Y@35bN|q zZGr#;UJerh;0+uVTv$W3PiPA+$Z9h3DJuL6{ODu|FOqW}Gx38J5k}GB<7{Yc(5xXH zV6WU9o{)Lp9pPz{G0Fp9c zPQ=-vHhEx7K=Cnf6__Sq9s^qfpO#TWQAHI)g^q5JYnWu%0IP)v81O@j2K@)A08T(~ zSE0{IM1exkduU^QTv_pjQ3leZLBjVz%+ht^7tS3YRiK*!(fuB%$rzC3qyUx#-6vr? zbqRb?3HU=^NbXRjpwdA4bm?wvBZveBBvdZ{sUZNgS5{#!0cV9K4ax&4&I!AX8UoS9 ze_qH^L_B1^>_9l|0X{(O#g2nW`n@2ckbNP$Ln;NO!vPXrVL&rSZ0JCs$-s3`C6HlQ zLI9|c+(Y|htQ|Tx=rqv9OLTnNAZRXt4X8D$5xU(_Ndu5(A*VnbKsN{}Ah)^>s|&m3 zTuWeH4S~u*PK3<>M~%Y(4(VzD{7-`B0r>!uVF;WAuOH_Y9T0fvv=GFhlK?MV1koPo z$IwJ@{D49beQ3V~8Z%tGQIQT*a{__ELyW{fRFEn!$K zFe1a{IT^qN+DMi`KoEdd_bZx86vywR_sC9A5tphk$8O8SJf5JCDxes)Pgek}HVj4t zk{eO0KmkV9MnP(KaQGi-jE?AHjX>p2?&Qvo}KI0Ex8L_>&tkfF%bhTR}-4t5SG zh1@o!n%r8IApD0Q%EEeu{efPHNjf3n>EJ=6xw%EEMJ-m?Vk*g&=ugqhU2A9fX^k!<@fE4TFFH+=WmENCD|UKxr7-HAo&= zYA^z#7XJhn+(CtaoC?U0?$yH3$bcX&ABZfhS@^Kz#1zRif+hLE%85 zYz&fI5PD#?Q`|5DVDkWtSWN%|4a7x*IN=*)#8EH_C@wBAMcCFcHyL;V^a|i)!QHtT zlG9_z+8%II$QU4Zs56T);IJ^m1_Uj59yA7A4^Utzf}L)HaX$zyvR5**#EsK?Y%b2S0<00R%R{DQp{8Km2Zj zYY-EB2ePuJKvhv-@q&Qo4nx=g&{C~lmCO(cF4n;Eg|1H8dMM?f!_Xo@8emX_z6@!T z4D}F*Y1xA*T0{V5N?3~^#z??Pk}n}8NwC$ypzQI&@c>(pe;$fGMznBw5!(!2r^EUI z%OhFnsdPFxDpY;&Q3z9L28Sg8tmE*6T?>Jb1M7Vt<0?)b;<^)NB`7k0%rJy=uj=KT zh|1WL+02dklANTY{xM*yQ4V>XKtPQQo$zXez{z2jBcWr6TfuZdB`i29OrS6dOR&4b z)S-eXYF?CahZGNym%KBq*=^Z7k5BtiKN&U(;57aVK!%5c0| zJ7JkXK&;CTDH&EwNKUX1!w~^B4aR&>L^UM2=%i=35e!l;jMkvP*h^raQ<42JVt5r8 zgd`0PCKyZML4dM=5tPmutkDUOzz$;05~=Nw0HM$0Z#TqsC)7ha9V{FJDAzN@c8EEU z9^5jVLJCZH5-bCt%dpmf;Spg5pC?NL22dhsoHR632m)TPCTP?U;II(LHV~g624fV= zxd1=7S1Z9XKIQB733)1a$j+B%XpIV!t7zlhTP# zQ2J@ii8B_-4GET9H_%VvqJoD`RopO+Ko5pC0+vDGN~x*w1qfrXPQi!=2Ls4~Q<;}R zjRA6K17F75QBWH?Jw_lcF-B;Z5vU0moW}$3C^(eYWR!<-m3D0wuQl3`j=-1C0O zJfyP%Yv9L$IS+8`2l`2V4b~DY6Q7{W(l|Wwk${qdm%y+NZ3bFAv@_`8_>4kF0ZQf9 z2`eb;G?`T)g%CpoRfNRilH3qWpn^dNCaKJc10n+-{{*F^CP{^64m${P(W?Y&KnF5u zLGXY{j|6NTzA>3TLFuaT3E)r&FUK)!ut|YYL3M_i2huobmP>-A9JtF35tPcBTxf*X zFo(n102{JCkN*c`xEkS-B!M0~0a+Q6EcQy_WoKXjzfNdC105$@(6SSY)wosnIok?55fnoq< zW(F~UWDIQyVz=Ui6A9DORq|Q8=dsHkpgY`H(7U|EyeMlkw{U|(DlmkD3xQn`4F}!U zAeBKF)d7Y$5>PoLvqK^12sb4CFG?6F8SE7XvTJKsEX)1TZMoc1>F6EP(wYOQWE@H~ z>OiIJR3wzmXlX}6KM;2~MryTqS{q?$pXG-o&YV}ayi+poa14xTafjkaCX(5xl5$K; zE2&Y(NF<)jW)7yul%zJ|h@>4!C7ssPosJCkp3qWC+A$r$S5UGXHPz|$O5DK*IighZ z+mT7&XIe{T9MC=!m=i`i!r4?xi)V~3yD{N9ygH1LAw1`AQt4INWHy3;9_oqLXfw%Z zB#bifvksWYBXQL3rjLy@(G+=oakl!MvW>ZhfaOdJ>k5?4vV^l-W|s_#rEJBgOLylw z&$rws3OpGPQ>sb_OT4TkZF8_UCT-sqcuF2Nd2soF#l_nX=kxI#J8m7wckRsBzAc(* zdCYbtYdb9PbUa8jwk$u|!S0x{eJ7ulr}!1ueaUvGnV08KnejCxSNdo^HILbK>zwVU zW^x|9HOM-DVmYlOz=HQ$9uRnr9(F7w6q>L6Ps_!vb$-SH{ctvLipTt}pShuGhs4?8}{&?^Z6cC$1qULPph53vDY+Q*1QzsTxg~;^Sg8o*eYfFihYgmKd~b6k>_4+D;ZM72WJehjqE2 zHD=s#*7cHQFI&0Yx`AEyqa?9 zLP6sBp!$g7cj@IwljtPQF#(~34Sbe=C|^{LY6v_cWXy5k-b9XT{@W_w(PH5 zVp$N@<-qK|r&<(CA$4%k7Msa~`MkW0Z}Ie8sxvlSw8c$h>5gfpq?QNSqi=~Tk>dWk zRi&F)v2jt%7f{OqoA%nnW(Xu1#*U}Z&|j2}D$A_I&@uuqUF3N(;7B31oZ zpKG76gi6@Kkuo{=O4PE&BJym?E@QWF|E<^dpkU-vUYO_A?26x7d}4FQpo+v#CBLzD zSfl_u`j&VGyLS>3Gavl$KP02kQ4 z$o0*xufxuLC1u%dVXyBL*5zK#SZb_h;;+@qu^YuG#gl-sl3!bgaO8(pls<0j$_}_$ z$twWNN6!$d#I{(t7rCl2(@q0X0F+TorVw$?*!uW?Q75Q!xZ`w|0cYerHUAe?+ zD@-{Pvz0EjY_;VEuCVN|<@0kd9JatSbh9f<^Hb*sel>IamqiwaLeu?7W^3-_A<4R) zc|K;T7yI~w+!Y_QT)w_?iGgDrt=V~Bw8Tnq1Mbe{8`&S%TXy7L{i5YYORnb2mS0#Z zml#jRe)v4Dd-ZU;hJ#@EXV#}C#7ev;N0qhDz|H=LvpeQ&o0}{t{E*`aUh{*V#-Vv=T7PE#9%JG@IpsM z-<{IbY*;&(&c><|*$lWzdcSk0WADY=9gedcbxL^5kv6^=iF5W9QZi_ZC#D@3LMw4d zOu7Vw>fx{2Pm`tHkjd~*y6`}pKC&=n*Ir|u;E3R~CLcDK;Eo;a%0og0+joyeWUKaC zRx;)LmUX3RW6s;zl}}nuW9Rmi|A}=zX!!z4{~@0`JN#V%Wb-P^wmiknCsZ#`TocWD z`i8Zh#A5LH)VBa0=7a0s#wFI*zh&7_xx~Of0+9{5+S@Eafe?RF?$&QxbgP*NKlpcf zeq}HMf#<_#3pi&2E~fzFLvo-euy|_s+>NQ34?xe)t^9!S!hCcgySaYFhFtc0mUo5R z^gWgdtC_xjfq|M6>Y^<6?D`+tOd?P;Az@$$CFR-C-g0CZe8;kx$P!1V8}7rT_`@VS zcKz~oZ0A5367o4$tT<~4k&Nv6ohwdb%|Eq#hD{!_Y+|j?0k)e*%i7t&`z__{je}*I z*x%f58DqzvL##2jRLbpoz;aJXAtn9-9T0gwd^JyALG+DN7J{m08iDdT%Ly~T`60}l zKOAPSK4jUyZE2yD?C|7@4eY9iEjJWRx$P0GfTqVVJ$r7;qn7WL2ofdaXI;44Tnvo@ z&&aiQP^mjy?<;-G0{P$0WuJM|a$wuiqIV6}zv~Z{k`haLZ|=bot7Iu6^}Nml$(4%_loxt1B~nB3 z6TJRdH>CJA;$C*ekF260U@NuLDlSU=Shx@g0&J%iCcfueqQG{%3sH*?L!P;JF?-%S z%|81ss04*%zE?2u;#gufxg9ju`4`LImE?YU+~N=gUI@bwrA%_mD`A{S2p-2}L%9Ko zsTP!h=8gtwx&Vx4mY93Ivdmha?|l6#5y?-eUyyyc%zE)Tut|P!{4%@w`DGh&XOvt2 z##*R|u|N{cav0Z73k05Ahz^C~fyvf|sfmrV{Zl9~`1hySP60!_%#8Hi1_RA@Mkm z(E_g)vIwteCxvOfANNvj4wZ2^dIh`PVLd}=Z5huXzkktY9(>Fyu=BQBp_0&mF?R8z zP+<4ScDpDWy<6`U&L#ywV2^FJx+<3#CT^U3b%pg)R?)0fQM{|XUj_kR{}9 zYXy5?yY+#prNpmO_&V!)3(EGRJFM4O%|asJ^o8B$r9>97U=GKV(cZznVtpI7vpLkY zDI@m47ct&6HpC`dOWijF?914yA(Y+ZI`>Z{0E{_&@<8cEw#xpTe5*mtx z3oh%qmEom@)Em0?11{@THjx*LDM&HguEFcYuwpz+Wdp5RF^PctXBZbiQ_C+v`CtEV zYkxno9=s-AP&D)J?ZwQ^fsX02jm&W#&721MQHVjUAEkT1q!f>X(u@Su%XV=<3~?|;2kUk!FvAaP==zpkOHj~%|*dLeG#)*xZU;k9Ka z?)``BumW=^3^`l#qkgxa{-#mYY*W4Unr#T2Tzt{sJ?|N?o9=|L{(QZ)OcZ!+m_=C1 zO*OniOyMy2)Pt(t&+{M#hdmxG)lx4&u&zG=2|m$f-EKRzU{Fj!mQqNVVxJ6K zZ_Bmpv;KRDz-!#F)g22WJ_j+Q$W1Ybr8@?LHAsL}FiCazdfSnpsTE>0p@=w<^$t{{ z!(u4=+G9DM7sCx~=GZs>rF5&vhnctbUy4{) zh~_xh5gcsuxOJj(i5E%?x%?;N)|c!?VWaD-teDDz&MGKW--xhaF=O+`18t40OIcRT zHyWzv`g;u!jE{zbz7whM#wi>MAKAmWG$tD(j^0Z2ibkYyb68l*?w_)Lt#XN1TnrQ3 zH>a%+TG+hTTFO5AHS1~YNWywu?%WSq&#;;WpiUII%@R<>$}m47XWAcCaRLQ+8=iGG zB8>&9Z5-Z|P)(07@SaCxj(zw_AQc}r%s7IXLykXQ=3wW47^m~Tw{aFO#AjEFI6C~k3uC<;m3O=eIw#d-Hk=`gERt$M` z5<(;%O5uJgG4z7$B-FPh!l-FcIE{&cq!%T&7OiCyyPQE%;o~RR&U@_}*{>OR3?Dm; zDg^cdk1>Z|_9>i<`?J=Gs-@myV#iLw(LD26>)j_n-SxSa8?8UF2tB+C+DTBy>!9TY zGrmlzwtEW6B}oI=x5~On^rCi9UDm)Nb+RJ@g!=9x8Rohbg7Ei`?Kqcx^UK!tA|Kw# zF1OjY=7+xiWxyXFI?mRA1>6&1^ZBb*?|0M60Jl?av0i57n3Mx1U1Ak@nYBd}fS+2U zghFd4nmZv-_B9~?Fsz`(r#9Equ6YvpTBYN;2)*bkRM7rNp0lPE?0%y><@lXbbFpi!Q# z2*+v?!|kbJ-LpZj??s|5lB2ocR+o;Ar`RD<_7)r^4R>RS=Fn@S;I?<%VzY~U=$PRp z;lq$b_HH2960ffrD)iUCXWeJbz4im^TO|Td7tOY+3;rj5U_4*60eS&OJ`&{Wt_uXY z_%uH1NtdHw$|l)fNNIddtCWr%x#Gm>Ue6fKbyPiQR|8lfj=OIwcl*#Tai~L zq5n)*F7dXXAsVdtx%HPNxz~STeYQm4H71HMh|lvu!6-Dr1yu7uYSKMH%1H$Wpx8U% z&1PLi2lD1qI1sZ35TqjCWAtep$a`NbJn3Gcp(F13mGzmD+^T1-uaubC6c>oExraQd zVll9=fuq)1F(e4T_KmVHu#pRe)j8+O*1i%uZ2j7SiV;?}t9%Fi;v)wt;IW9P zXmZ3fC7y=iA8v3}h5vasJvI9~Qd&A2&7{e=Z$5@=JTvB)(~uU@p-pM==qy?L=pS4L z3GjK7$*N_=g(1rUZ9Aogg({uh^^L=9) zy#$s*6(3Y1F?jeP4?E1BT;Ivv{))9u*zBl9@CTnK=1@5rO{5Z;gV}flzWG=sbN;jK zz}m%K`1C8;Baza*xtny`871tJyKGf#{?0Nxd*$uYJ*@3YaRmmJ-NI~tux`se`KHw& zOJ^QQur` zEXt10fq;$q1|~fN>|X`j1Nru?JD2abQ8W3p3!Y*4UIaE|w^dgy^*SxP`8cGUr|dS5 zmHp*+@Q1n&*#5@v7)oS$+(Z`E<&kzc!bTj^h|C}`Q;Oh#KxRrwMU)U6XKCdB4aTjT zb9XGWsWt-tee4smEzahjCWpqwww1Y~t88m6e?_p*4qqf}&MjL5NAw3Hz+U#o>9$ju z<8<4~{}B}~X4jl<+l2gL$L=g^qW)V;9J$pSZJttrmmnnhWFZoS$CvO*gzSyuRx|7b zF(Xupkie=Ch(a(g@PdT-jtDM!%q&K@fHOvHiNMPd=HZufIS%P&QNkifw^3O_cBRhk zn;g)2S;8XO$^dj56(?jr{OvN2p`n>@uIvJw_xb{{ws!W^Wr$;F_BQzBPPS~f?QMa# z7llFaXN$rh4)3vjt&(nmFX0ln;Xn9})8<oG8rKAtGQS9 z+Q$B0foBfj`F06gS8Jg`D*zIusJ|g8%fSFA@MOARc&Nf zt}p7y>9JY7F%Mgy$N8d-CqXB#-AD@j!nZhhn$;WguOLEflx>6+mOfALnJ?UEiY{Cc z<@Fm!4V$7Q7;FuHLxd)#rd%c7m57=CPMN~6)=%8)C_`wA810;sW%JYRz zi(!P*L|*UIuv#xL?8Bm2@U*x)r;NPrX&y`|>@HvUw74gQ-3h$(X`Y4_NTa~Zp5`eo z1>Fg}_u7cu+SiOq(vV zy&&+1+`7(#hy>n&gkHl#pAQs!18jr+{Bm3RoDa-ofg5Z-b`yECK0IFxj5Q2IQecxG zu^m_tXxUBY+s5^w8=fwkr(et1)lZk5&u+iUwt?M$6}a3LAG3Mc1uDqIdA};V z{I3my86enQSoen9`5&|WR$z}Fv!4Bb1=S34@Hy>}Xjkbd4RL}=5&K4&lc!7YIFs599f9%C=uVtYH^ z&Q&(vZdfTOoFEV2^DTCP-Eph!%auz^y+U%phTL^uv#sF*>K9m^%0@~C9W`e3aD?K( zpz36}@!yeveg$nQnM|ahg&}rWi8}K77Gj7;p&6wS3aujc%#7noA%Y7LWR7qmnvKO- zr)oplz`sT`fcX1FF8Q3Tsf0Zj6wWP3`Y2IZEqtFH+*4l1c0N<~CAQC5{%`Dx^AXv2 z@Y^5}o2BxBprc85kZNHDeUs{m_-oQ3v5%2V1j;dVL`PMAuTNAex3im$+1Ij3$`1P} zseEXuFU-HM>VJ5l{Li*iZ*Y~jmQ__6I_h@T;wrCa2R~^$mu>%T+3C4g|Jf#&3%tq; zJN$&TfD)V`oV~C7batfK_631geIX}s0UKlY+EBXV9@{qsUI3;@#y%|Y;xEN<;tm%0 zvF$E_SAJn#%?mh9Z7`5gB>VEcx^Yrzy~1dk;*tHGH4xk%+R>cJF?RQX~sWOpn!WWD~7O%WUAHeV!U z2>heY+P>L1yZm9GyV=gsowpmVvSM_veH4<_5|btw7QD9}wcTK0XIyVB%iZ{M+muD% zRWcT-+*}CE*UB(v%;T_KwW+DPf)&3?aP@QrklmBwvTu6EvL;W0_C95UwUPQx7;EDF z+S#|BvhA<*oRF^xH$>knT@&kiqHKMx?rGb9nRBgw*7jquP^!b26gPz$#X1&zKc@LD ziwo9usE|n3t!`x!rQ>0iFIWYn7coV%%cU=Z%JMFq%=xvlwd}huVo#TtCCadFMSpL5 zyoB#5!#vlsgR2A~H~y0C22tQ;L+IQn+S%AxsvBa4Y@3sftHbUY?n@jNrFg8ai3gCL z6#Z;=lb(P9#O#LX=l$p4Udsn3sA5;{UU6E^`Ul$&Eh2>gI~**yN;ot3>VMmAw4!h; zikFyPX_G-2rAwey6ihMP&^)U+n-Jj2H#Q_U_oB2C%!c{1KlB%Dtl0&=i;e7dw^%Cj ze&dF;=09$0*tWC;S?usUjQrm}ZhO^|yUike&u*4aSqL&Ur^*Yc6k9mODNk#=Vwl5) zxVxuk=;Z4c@V(UK`@!Y1k?nj%C>43<7_*)t?5|pCS}xZ02Uv`5JVm&%gkhc)Ih_A< zrG+XkMZjv7a0!(ST3qJY$9lSgeF)njJ8>$~l&GC4vJ-2YFh{e;9h-3MW{)Q~3D;Eq z&q=@*pKM8cUW2?GQ$-dJXiQSEblvo zQ_9Tx-wR$-^Xz#U@I~Sk2h*{%Hp>C-O z_St~&F9e`V$bQB~wgRWF-z$8^ve{MZs7b`9Am31Y1O%eip58XZoD!z>u=)#xnIijN zS+%g9t@7J!Ecs1aDf{~ih5hW(3x$>JCNd0ue>DVx`2hac`71axkNi`ajkSHNY!&piSnCpu=BG! zJ`5aoo^RWX>ch5Ddw$g9<>KiiiUo`IyJnqj4?Fsr^|V5->+dK#lO;~IomFZIt|M&c zdR#1d?T)faR#qdNS?GCa0v9CCs}Yv5wi=;^jl5ZUCj0$pDPp+}m5sAc{-Ja)yXh@K zWR2&QUcjDxtF)9I+*o=Fd-pA&nw?T7bhDRF7k6*OUBQK|FeN)KTkmGK*9ks0zflwl zpFCD4e3R|Fu=H#;$x6qW<60<}@e3?7?6wBMZ#?+XL#1QJgUcF)%h

fUjdWmO7ZT zNoZjQzfgJ>{k4vL^)IDcn6p_}&E~d<6-;Orn%OdkcpAI>cxj3~)hujb|N3xgx3PqQ z7GXO(C04qhO>V*q{^e`JD)#mt>}RqcY%8lOb13;Oer;RXRu=uE{rjx9u5>dSX%mv$ zVJnYD^Gh_rC#*%IJPNys?h!ip<&nKNwBu-++l8uCj$M#{^F!!FkJi{WvCp<+PWQA6 zJ6O%M{S0RB5WE|>Q^xSo(3Qpl2=sIaP3-k9Vf%7NWZ_Npo(*i@-L~b+9OV2lzRNat zzDK{!Y_e0>Syq?@`$DHs%Q{oS`od}4)*+n9UX0kbEqWWeY+$#1TyzwT#do)|2f8qS zniKo*_4v88X>7@Tw_CVX6urLjZp7)D!sX##mOd_oCeq3vc|;e>*xbm|J2l?YQZR^1 z=ZH@TzCcSHWhT_Tz45S6-5y9!o$R{dMn`XQa8gn8-@^-bu-B`^Z;G9n!Qn(U-_Gqp zvr00F>H)=aGPeiC+Xh;O77Oj%7>SG?r~{nu&tE;?U-}JOJk~JFj(!u^VLFkyu}G4J z`x>AO7$!|FRd=-ywbo_wR!J^OO*d8dCQy9AU|L*%Zj1=`iwz9}J%bwC_}|MaM1S*K zNS$OyYZ0EvIsBwD5cJfsBfHUX#khb=WfPf5f0kW4j;MqIJrW2_u~jG;%JzT7zCvuR zZ5ft_7>}ZfO>_+;d)dB+g|)f$6GDAS{zE**kblUqg5LC!uv|31gJ%JD-O5stZI1~X z^B?0Q_{Wg=-AWOb=7*^JGS_97?W{1rcJyCCN1i`x*&5dM8|#MLrEx(n*=8o&{D54W ztZN+d(;ErlM!UcZ80T-@@y+8EjLmxrhAblJbHOYs4XiOm3Xmrn(YR0T z?m1zf$OjA>E;-&=XO$mLYrTA@wBe*}zl$V9mSpTOX zzVK1?MnDZ8)Xxwff0jT4A|!8rV%ySEpc>-VcRwTCY^;I3_*EgB``u^3AOv2I@?=*n zFGxxH`eOY+9_y&lCF5A80@Xv~h>6Cr2vgbCshDx&U?cz@!jI<0o3RLU#EUmmRFE+O z$XOxs5kdCG{Ia#I`OCuBDwmj@)d-B({gwP$a#+j5f>|$<$D^8}qfsmqc7#PM5XJ+Z zDW3uLJpKp81e3FLV#aAyw@i06aBb2!h^@C{4SbBgDAl{c2H!redCbBAz4?zKCFbvB{LALzr?2yR2qDc=!Rm55^@Z&x=8J}e-M5Nqt1 zjM7bof|en~dK#NRPYYM5v+=$mnMrpqXm0;-H;gZ9xppAdI8|o16hvF2s+fQfb zd{0PKEj58HQ|5paPkc}Kl_2sW$_0NC*V>H|%EiJw$LXGIHsIz#+`~d#i=rf3$u`6c z7F#@?`Z4Bd_TyyQf3I)~3Z|iRis9zMxs}K_vXqp$hKb>y?-kr7?26CXpUVB@KH-Q} zm>I2&43P~hudv%ZQ7OT?R*J*W;Q1{5HEwq+!bXo;_uD+)eiY)$Uj)qS77smyNt)dd z7+)c_P`_bf9oJMXH9;>syc}Ct{)jMNlKbSN!idGJY+Y>Y;G1dGwl0qQ;UvYC935{Y zNr8v&RSC@rWinfIJKan}us|O9fNld~d1pTxdO|p`ZE5*|4aeQFCjgWOkU#=4IusaF zNH&4E37%;J@e#O?%aceL5frcq$02lQaY+wpCPsQ-PKKcww$2ff!pLY`Z{Fa8QWflM zVna^~?$z`xk%TTl+7|L{q}Ugp6#lhzAlTjB)ZVa{CC@2a>x>~O^oVimH;rJPDa}!( zYTJ<_gIiJ!TXr-Oix_6@^j6mQlyL8S1W`apl7V}JD&mu1^Nt$PMDav?7g@Y1w1bPC zuz`;yvQgErha;gzG@&3vUEbzRQ9}+bJ{3tN;`APh7fEqh6^7ke$25K*vS>UMv`*1|}P%@9gw($=1M#T}MoF4fJIHnaI;uM-?wNDF`xqo_E@QJw>jtI|-;#^l- zE9%)7lZ3hIM5D7Ph}~rP$kNi&&VxC{fw62tM#-d)m0l>QZPl}~VKIT^vQ^M~{B5%a z$=kEL>@+s}GQgXUbuu22Zw}_yo%rAw`{egaD?~o9lkI&)K;-KCWQ{IL8qFhDnd?>I zzTCU73LC9rczitOGH+-Zgj-Da4}~TQq9EkH$A$Jtb3bZzlIU{LD(hyOzqf9R9c#6E zm~#{4b&{RfMQ;lABJVgz(<{gT)VI^k?tD||Ww-i8|StD-_KXNvDw|0470t+b$mH6jNp?K}$|eC@KxUqHoxg%_NE77{FuzV~J8=RX*ozL`(WU{j zW*t=>9c|l79JIiSYaY_|7jnqx0yHXgN5N8f-<3znSmEkQqLv zCLHO65v_=@F_dc!g9wB{$=KYGX7lP zsLQuh?cTkcCt5hE*X~d@5>>1C!;ujXRazR9W(P?0E_#-6jO1TDKWe-g35YvQ7^SjHJvFJB#%ModJ z?KjKLWf%8~{za{~_re;`*(dH>TNH{*u*CndTl+-EneSlL!ss+LEdFYZV?=`RkVquc3p085N^$Q>*d#$T7a9-k7d>YneAzVu zw>Ow{+7LMwI(+WKqGuBqWeSdTPM;Tgyz*hu1*0H7hu~w~(H(3fO1 zZDc1?FA;a2zPKC2vN$ryka)$quM&5i<`{vwHAAuqXJW>=(}!NUM3grg(hX3OlDaH< z6FYvD=wSN~iW|;wj2Jm#_-7Xmm{0ebgW`?~3hhqPSEsb->~4_YMLW&=9X%)pR}+a{ z{1ItHY-6<_6RX!OYBxv{dt^l1e!9aEjHVMiL7^AI!}%)MDg~E2zI`XItBufKU%42C zXYtoI*72RPwHq8G>5P_4kAR^gpHAWRtmJC3B3BU-zho8qx<+wT#ti%5G=m4E3^(+s zovnFtf%d~rlQ?&Od^ zp}HZ_8E04i%&?nr2lZ@SpAtk3LBpZVO=$y7YPf%qNF_+_F>f2G=LKDu6Ru~I;-z9B zI6USXAj?99nC(l%+ZDRivX*^*QhY(+4=LpOLdIP}W`_lrP+MC3+O~Xoj`y$5VPrUv zGGd+Ze$;9qs$%6zuu=r#FPzDNIdMDNu-bM8i~8Y1NcnAh+3Rzni$_}*+$*K*N2_g{ zRvAJ3q+%FO3)bnkoo$SH=0jqzAsHFf57wv0e9DR~6B( z6KLPcAJ|tB?ORjSW%V7nB>>vDuBc798aD5OH>BtR{2|r<8&Y`uPr^lX@!<1+J@oJd zI{C~;#m&ril~{E``_8M7BJGQph)581m3Z0-WQ8l9?C2%pCU)yp;`)jL1ag8>6;JL? zrV?rqwcE~~xk}t|!Y8T^icSE{0x5Q!d%}y1M<(IjxgDfB_BZ18wf`R|WBJFCv+z^@ z2*=XS&kHMyAgbVAVW0e4u%p}mRy=3%QxD@~-^Vdyj9ijOCb#x#@h+?2_mMZDIFg8a zKxT%NaMFmXTcDYT#g;aGdZ<8LIeq3~uBe(*-?KP|k78W|!`|_lhCIpS-8t2bHcQU7 zLii7F=b?a^(Rx&HE%wP6PK;=#(On?MTv+svO}J2AzK}D>RA`%}neh%(FfDRq{O+?v zdG&s8T`#4#!2K(iZ+f`ipuD&IgZM)JA&x zegt_nuQpD%*!=qWf5Ys| zepFy8# zbDwk_Rg4YA-Lt)mrq)xVbWa!30#ymM@#f*Akxij^-iT@T*Kl7|r(#XCdA zyBy|qh6^4D+Feq$EyH%b{xNZbD5eJLYNbUE05KhHrxMP^-ZH7LC*5LPM-}>;X9EK# zyY>2hMSy4YTR2+YS!ehEO8i7660$F)uovZ5Iz9L4XT)C$!e~tfSCNa%>4KaL1SLcF zHmS*>`^3iut*6yQSfNHA?v;#88pT_fH&mCJeQ$dK@+j}y#jg82oJ23ZAWomNq+()B z+G{^8_r>3dx0K`>elLE+BFxCQ zrT0wqOz>-mhD&Jdb%I8tGf((xc1B7qli1LJ0cLpv(D^>a%!+H7ajzEp+#BL=a&VQF z+k}ABJO-*vncmD9OXKZBv4Y=}BaS-WSwCPHcvG>lXcv&1CLHMP^_qz=7MyX&C?l|U z-V*nTe3a8jcwr7p4%p8UXw)!63d?J@Ev=v$`{A!af`m6~*CQOb^OiMH& zGc}1hbuHUHATL5JG1=w}HNX>t{5@iC=WKrr(VXPpRr=-vorpOr_;)FZ0`uGK`|?A0 z0tIu(0w9~?$a~L@+U&oqT55?mcKFK(@B5=*?<-+D%WNyzr*0FMt&5XMmEy4x7UCa(FjjrN~wKA{_kLqbq%}b&t<1BZ;Ej{ejFLLcd{ep_OJJbcX%3c^D&ca-8d! zhiV@|X4~!fJEUclk)4JqqhJspF+1?A>}ec2a(gU! zQPCUCJnm{FO`lgCp;>$%ZZ`}cq4#JP)GJXWpwGwn6-|g6r7?$)HjgDQo$nUna`gPnemAb8J3j%(zzFCCxJZq zU>1Xoq=(!PKY`<`2S?dvTflQX8xNi10=B@E`5#a`$btZz^G* z7NN}F(N!+7icR(z=6DH4@`#L|_U-)^^B;2-HyM;G0mxnZaMPN8GEc`{87^+NXCYvN|n?7JZId?sL@*=AInMHS8WEjVi^_f324+e*&h zA-izaQg{VOVjM|Oh(?hUfPL&dd-G1t`|{EP@vvfpW26O{VN?*d1-~3|$+vzcpVxWg-Lrn=~?on0Ufa`o|@l56T_GW8(86 z@fZBx<&HVjZf_ODn%;B-09qWvA?xy7-J*M1TyN^_8=m?T41b zbB6$(5RwU*BvsYbM+I`sec$(xkg4jb?&-ODW_oUfz>2FPBCN`*yQ_lh>UtskL|X8~ z3)uAl19+_mUaP1q=(_6uRQ})Rt?KHYB&h%0bzOfS`3O!|z4g9zyzlcq$M^X@@Q+ZQ z({jVaa0~gwRy$yj5?Uut*zI^qDR0-ze$wK-`Lf@u1~Kc3f}i|>R_|BYqi^+wz0UT= zLPK6D_nIrb_mz|^5iS3A_V{sch)rGX4Ytb%Pp%H|gsxJ~Jrq9okb3T+$U%1WZQcvZ z^o11r+EHlOr#>Kh+5bI?fFLH6ef*vy-gZqs7+F0k#ytB-_?#o^!3a~2df)a(dWc&7 zT)6+sSn8N}-==dP0=G;z7-ml%^UnT}?s6Z)TH9W@Reb4v|AqH`=~3_gGVD=&@#e62 zeN-9gkGgb^{a0z9_|g^X1{Q|^KPJ9r$G9Eu92^*%&o5?fm(NaQCL^|sgHvW?jbyT~ zIP34S0W4bHs*Az)-X%IMz?X+24ZlYra8yNOw{63+EgAIwUq6-ZNRgq+?%CwousKv= z2fVme$D?5TAn;h*72aK4Fw%I?!Py^35LP&Q1Z5+GSf3jjBh#78gdINKcdogH3rKSE zj@UIr-il539dMh@Wz9OshKIe|8@QY(vQmG?zQZ*loiImy8{_F5beHTmL*8%@P^r;_ zNrdfZE0{d&-5TZUue~Rci6?XI52`-&t$g5xYHo#&9s#({eI&Xw>8#CdZ)H*E`Opr( z_5kcMGA@gUVfX0XEgRj>n<@t_-pbzJS;UlL`vUG3dKw00-FJG^~VmL*ij-gKL9Gdp<7`#rX4#0wSXAz>T4 zVh%9)bytWNvv(U1FiyCM?)*rFS{cIs{wl-$ZMbUC%k)>%kp`5myrX)g*<2HQy`Drg|6j4ofRHTgLjr`rN`>f~xItnjGp)~mf655~^h zf{vc0GKQ{-L-qN50;>Hl65fk)kV0Sklh+em5rk-~F;LZ5?`E*^xLk6fbHH@v5!V(L zyN8!2iij767Y9&{*e#rt&pNo}TIdYCEhk>MRG?u-f*4w|FZ85o+EflLlx}01Hx9%A6i?u_`KSJ4PAAW z#`m6v6XQ=^X=G3uznpvH4c^a}v2Q;lyolZUd0|bpX-$Je=SV0lkvMd8wfs*TwUqy5 zZ|07hJWJQ`*TnSQ{?I<&Po>iVY}U2 zla5|YopWcut@>nn&9)wr*@qo>yAA6@DL5(zXa8$=Ffx)$ztnqKsZbm1>8)euWUw?v z>LRtP4%56BYulBvDqb4~Bp!(RudK>gy6+}Tcc%$2`pBEW_N9ceBd_!}?x8BXD7%fS z+Yv*RP)Auo)nXlmZc%?Xj*8f-8b=Xhn;_xAS9;4ze%qu!&0od;)E-;dV!5Zjchq7p z8S;jHuZ8iAoBzBpLTAnL|JuSJf5XX6SO-w9{*dd#(;mqJUGqL)b#ci|alvd`9$p>K zkMzc)gDTHK6Kb2gs(D>MhcV`R3=6|)0ET=ZZtNCAulK%kFN(`c21<0Bniffhbrd{D z!E!4J79La&$8i)d4@V5yMi}gTgLk+jhwQlak_*TiHf~wdu->wVLsjh9kAj;{(*ZtJ z=PU|o!mH>ScNAnqjtHN=bS=28M)KG5fYXQ*oI*|(=eTmhEVKI5xDz-D&4FDR-5~gZ zU=IpM@&G7xM)5%yfb9L7JnL>hLfWw|2X_* zvU}fHdhpJZo8Gz^x5B5}xu!lWc3tykRWz6U$>#M3)i)xF-m?1Fl)kG=UFaDacL&g> zM^Pp{zj)>~-(XH2KEFCi)pd5U5B!~1+Z(rZ=qD)bj>7C}A}$*d)Uvm7S?T@1Cxko$0DCaV!9zt@xYW zCc;~Q8T!m0Wtv_ZsN9vG@a*XxVTU_ycX^%T?6$Z0D%oAn3tjA+y`|51>c)+F_Sj&l zOQXzH=*+y@IzLhDa1%2v!`*#0+r6kqn$rWkgs&qoc}cXtyUTlA;LX^HN6RkFw?gVO z%6KZMGOH+YSpkv_MedOc%umSuY;D^6sTY)+QTF-8-zF_@e4qESjpTqs_L&VR zIagp(8x~sf1~yScTb}MZdqTeQUhgTtd+@B;C{*?I&YwrSo$70?wa9#Pb1S%ZTV171 zwKyd^y6ajjwV{YetLuKqP^Sg=|2i!`c|VM~L<~9ZmJ!k8x|+~6S(hv=tm(31nsG@p zUp*bUwD?1A>h4tB@!K|Z zJ`s^O`$7}(Kevbf4w?8TF$Kl8rJ?YE%j&{6_S#HW7IjIB>acUC!eF1&IpC7LT;hq& zp!5fa*cO8>fkqbC_dWr?CT~AzLz4Tbag)nuA+o7YdaL%vqFU54!@6uG&9D@KTOp=I z6nH!%(TEXGDn?A#ObrFR+aV@@{7G-Ncv@yY1vLel=2yrVp88XpjH7>`$;hEMtMti& zM7;h+?|ydm!-ydKR-N*9$;K?DbR1tOyTO0ze7*{R z?gD9vIBqqxHm|aAP}WWE1r32yf2u;j6lw(f302fQXNtCc4pWq05V5ByZyaSG{2aD# z!&1X)Scx04h;G0okc?Xrip6R=vLh53MBkbm)+}DUk@bAuJ6$|4NwH*K{ybjwpX}uP zpO=(>!c%ilU0$Ac=OI@od(G;u&ea~-+C<%~O5P|ti$mz5;y#rBd7i2H?k{0#@(aTD z)Z~q;+4e7EE0^L4B^I&NxS`6rW@#{YqLKvzWGy6YZhIqW~ zE8g47#JR<`;n*sGh}b?{%b?(CsA^qiJHHdW z!l4|i9bM2e43uNTLN?JiFi-&Dm&O#k4EKfKf?~q^ZRWPz_1{9_VBYW8D*7Ypy4Pa& zehbCyChdMvCvrRc@RtKVk$2w3%D(OW;T}+k#$*XwMLZVKEF&JJ5EnxX4Gi&I<08Iq*8r zaTG1JI6jyxe8MGipG6`wOXh=9S zN-twDI=*|I z?|6P7N^1~0`A83b33TH~0}}I`YR-V1evgkeN`Xer z>wU43z0q(yhS+Eq=Rd=KDXn zn>YJ{YlNYi#fGVKQ?bL%N_@c0yXM@8<1JAX*Mu`VUpM73aZYI%A2?5+O9@)W`BRC_nHEx%o>$fZ9N>gy)cvYR8w6SkuR z{WBePtH6hCU9I&gH$8$+g6zOpGU0k~IQZ~F+4nB7xvgGREvk~?^4s+HOj+G6tWXqq zHrk$v*V#l*E2?TtbWu{r#$3PRt1HR(z!NW=ONkXobgnAQBYx?*A|i)aIKE=S&=PTY zZgfS~qDe)K;v>@58$&g>tu7LQx7jq7a z`KVV^8|#44&wnVM4$6~Io0}H8hF8^1Q0Pzuc~l?T-EzLnc@Drq zv1@Q(aBfwN7@o#DWdIKAGJ5hf*2zrI07yvv<*r4GIeEXRL?Q&8fLd3}o>xlNv%W~l ziwH!)r{|?tVS4s-Hq9rOirj*k*os`QvTsmo)JRzLimgivnh|BZII=Z9-xGC8Vbydl z4AsvuR7l??&PnYFwV&k~#{EJ#DrLrV;Mdz%k{^Q4U#EA@YQ;OY)yLx-#BzamPMmm$ z*{hzyF69hx+a1C&5B1Lp7`X@)W~qNW0DC+pUTh1*ExGa$h+foZmJ9-9#S%s=tVAHW zBq@fJ$Ea)})CyEb0#J`=oapzrFhlpHz;oi;#jgL6Z;;&)17Gm1hOaC4tl`^TMsjQ> zcT>{$oXEtt_`a0;Y{qAmvd-SpW4YJP`o0MYxX$j>kXtT_Gs5aQ zs=d_b6Wdbri~VD{sh1*$PT-doc13|-UeJ{Uel<6cZ-2tS&qJ5m6*C>pD7WL~eDkvO zBx_kAtbNlm8pCu=jv3e)S%qaxB7OL=B65b{Uz*5pFhSrE4-fEhJWxqUn(YR z`>TeA@)LsQw%9z}HgBxt54j>%993o;;A$((%qPX=p=GPf#q6UgGH&})iKXI0HrI+U zJ<#7b=t{K0yw28J$ax0Qs+*?3=k@UX)ad-`o8U}Q%h~4Mw$&8KwAYIZGwHsnRME{{ z&WP$*h!_o{*cU2z^%K6;TUDdhqML`n^9)9_)3w!LY%Vg5vrV&!YS*33C;P7+z+@lI zWP2uxs$vUW+GxC;?Ia1=)!HXZwMCPEYOEvHX4o0{$>B(EXNz55DB0E39dEPg`dT^{ z8m6PU`#wbLf%k69JZ?MKopa5QYzKjlKsgGBZR5cifQ$#x8GwsmX8DHObPW(L^Mv%O=>w6iH6P?`~K+$Tv}noCzQIy~Gu! ze@Ak(lf7}H@8aCoKH+O{gy~;DtzeV0qrTiHmXXWQ#SVjy=s z>r+d`nFT}XbY}}qKTmhEwVgd{@M@0kSoXSw7D{mOxnNT0*AEK6$POfDXz*2 zHw@URANo#n*_y*n>j&dk+^6O~^JCxD;1<$+Z_0i4r@lEKNz*HH-}|*s{9h$akvdvr znmV$TTd1p~DT6dk<-YClf3w6EsX2($(lPbb-js#3L0GfMGv zd{V4ScBd6rwvr$fsv4K(;;RHBUk#i?1$4HM(2i7d=d(CjqndHIY#N`>jq@23N#4c! z0jE`&U#f{FT;q!O%x$LT(Z&9iRb(!_dy=JMYI#nbcU2QU*{fTVedem0Orlyvx zRRT2?u2TA4>I**8D=zduBrdg1*Z1@>R0P`2F4*Dsiv8UKL%lr(KZn(x?o~{7XgxHD zlhmNPucLn!$}SbiuI+1D%C_0{$j9o_HA)ldA=}xnKMJ#V#>&JN$8+D>;U6l=k3-^7 z;6sq^B*=J|4LHU zfVj}&MnqOrU5jXH7*&~+WEfVr9JMu*l3o70g*t$Sm)X6${8w%+N@Y!^XTd7UL1ej~ z?(**`+2=wcMru$3MX7Z-cPZ_d0RJ?9?Sz2b6}$Zxm7K1&Z06CCUZ^0oNNAi*gYC{T zfsUV^o=9`G!U0*hJe26b?cM3O=$>p{MxUGpPp5k~l*Ek77J0>PWK1Gl#Azchg#U*p zBDhx-awgfxWqzF}?()0>2iS&Aai^6bBH82-SyG(9gvsG4Sr@blY`@(xjY>({cJd}q z5wLSei#y7}FEm;vG-s7@q-7LzUYpoksPC=;Ax1I3I@$d~`@Gg2Ckg4AUKGvogd4^e z*i-*T^@K^r_FAc)6<91}x6hlSvsK{WaQ#1Rol8Rl1^6flS^1#^ub1)l;%@e&i|aSK(|LZ&p^FYvZt;WH=Lf-v*Yy6 z&(d0XrY&?qJDW!2>Av% zSl)sDOj=-;Jw<#7heK_d+`Gg6va*tF?!8g}=e%M|OZS9a5O41ncITcy>TfO)Q{n!) zs=8I;MjldBiJQe#{Ea>rduz3SK~$zA<6!3FE4ih<9})@tdyW_wny4G-gX;X(rX-lWB>B3ILc9Rp3Z7 zqO=s)afCdy&{#etjZ#kD=S6mexr z7@`kuBfdbd2D0qDQQ>%PktRM1xDzEf=%FcVF?V;X|5YWY-5UUef}^pDdNsyCCP{LL zUuh4_C6~>iJerv>kr$Di%}ly@FB*(bOAxrhpG>}BqI}5E4MB#aQ1Ego0eo&~iuk=J z=K~Oin1A$sa%#K(mXdP*%%Kb)n|-{)f3Uy!NS?KUiy}_4F_8fm_yK58)&xCqFMkE~ zI0}Yyyb%tQ#OR(5Gm zo1w!@&*F`Py<}0JON`&sSR&DII-#b zauyQ%afoLciSdbXq^~TyoZ8NNArf`E8~!r$?*->F#kY>Pco5W|6S(*^sNX@Af%eFj z%Hr$c<3T)6LkiKSNKa8`c8Mwe(s);s^zy+c#-|US%zeq^x#v$v^O@f=B#pD(IM3IM zGj@MXgt|V>zdlGg+!?8#GZQIIN1rv)cxZ$#=+(Isn(s8=Sa#?TK<7NYJU43dfD>IZ zAEUxu5Jzxrx1LH_ru`v%rZ-uM3cj%&xr!!iC7-q@*$s2TUiR!qN=4bEi|IOiSA8yw zOcW4Vpo3Ec%RC};uy|-0LoFTTvNB84Dg;l2y<{Brk>BzJoPC^L`>aJZ0x%!AG{_=Y z!jX(533UD$a0-(tWD~iN9sqX=ogJgAar0P**zm!{4@%sby(pCSicbHVKzu^mEl6BO z?B&8U?(oBvQpdXd7Yzelf~Cx9ZL(M}SzfEQ&zA5pI$ z+9O}=*}zV8`8(IqwInj#p?~zSa3T9tm%o+m>-OJXjv_zqx!leVirpnbLw5=cOQ`(f zCXe>1nYk%9Lj=0&P3-z_mqvvd#QI=nJzd&K4&r-c)P?z{et*5lj}NftZY$r!DhK=n zdsQW7>Jg-aDvLLQ))P44Bn%)VEQS09>N05TYp`8}PraRG4dB=rhO_#tVq> za6@9WwtfZv(s3@jZLBf2<^TYQ%q}R9I;3993+EY2CKK zR^(unYz;gjw1vY`{%62$+Uu ztQ1ZrqOg+nB;3+!ES!)LWtND&EuxyRscUdkzn=8}hajrmRef<-S@w|hYQ*LFA*;#A zAHut-VAB4ehdsVr`hG#v+Q$3X`nQTs$8ApZ`)HnD(iJu>3u%R1(SS;y%_Qe0^`(8Z@seaiO$HhV%H$4*=PEBt z9HHOADInOpulFC-=nk*y;EWmi#K}-q{w|$iLoYZSyUhTl`9J^CyM}#p+3y2U`UMw( zy5-l``-cKTZTH03kZl|k%^#t9tY#9l(1hW34%5so4M*8y%cUO>21Y0QP`;B0BDhPh zMa!hpc2Qv-Db7kwYA;tv9ouuN*Bk?*M61mx;UxofmuD^x)Hg@v@DI*AZ7zx~@?NJ3 zMzhoFv9FZ5rUdVInf?8K6lhbZzX-eb)&7uCxy#sfc-N&7IR+I}lB01{GF4)TuEZ=w zF~Si!8O8captQMUBogMXYb&qWbt-N0YvORXTQcMtz!L~8BEij&Eg&P6F!Wkkj;c~Z zl?}UQ$9G@t|CWcQkWhB|k2rm3_5@LcB0qfyDUe`mZ^34K%k`+U@`sv0H6=Omuc47& z#MQ`CNZB|d1KH%)6CaUxE(C=IZ9OTJacW$vbY zFTD1i(+et7!8Wyf0lvVUP)t%31h?=O}yMyZl=2xp%=Y_g~CUz!2h#5_EioX^lD zOh3q;y<519J*$>`*d4F&m9vZ#xRqUhWoa9G^trO#7lmr=gdnbbpIwrkN%wkAY+T)h zzo5PB^J)0^??`%fma-GK2p8p^z1M%WSFB%d8;WqxXMd>-mgc$OCXCOmEcUog#@)Q_v;HT9Xk+hS zH*M(r@*742Ul8hzB{f3MIezHQ0pE7^Xj9-mA>7@V=_6k|KYn*Q@UYk+$L5XJ{8Hlj zNv2i;A;_aQf##TN0(=j?jJ@F@|9-J=xOdS?7S*&IOeI&$1@@DN{ErCyq^*$=wTQsa z*)DcC4!msSHRbDA=7yli79a7;6~K%m=L>*c3jiz=&fHj1GF60NGyu^emWdi8An#3T ztm_w;=-+$9{}ZoJTVIprq^^VX{I)tZRx{N^YJjUwh1p}51@0HqD|-JFqy&kArTUCo zJ!Auo#{05OqqbX<*O+Rw>P9ih)1_6UaAEP=*8wxOGk@LPPhj!$>uMRNQswv|e&GOn z{t5s73Nr>$g`^cl@pmjw6RS0eXnfeP!Wx1Fh{J1gM3oHaw(a8P+~|}32TNG^Oa4pP zD-H$9dG(|aY2;*Oz`o5FbB49;xbwg|0;K@>4NRQm@hzKdx%Lr}?!tl`iGUV4c*9Z`7@GA-B#5?9+ubG09o5?tx(f=~oIoCE)#YDfN2baxz8&C}aq~rJ zx_rh2-6LFfHJPc?crz%b#(k*vHC#?{fkdtbSiyD`DDY$2#D2xkYoFqejP82o$h4< z{(>T-zIq2YhI2b4!`*}BA8cw*T3~ z;d1+;O zsx`l~i}sN59wmdlh2qK(tJgIKo)G)%8df6Q&CIWKHxaR;Yk8@L2kH0;+C@A|)|Aew zwh2RdsVvqFcCVy?3U%#CzKdMI{`Iq{ZGPjwV=Gyl4X2{D1Q$e_@ZzA=*v`K8@7P87 z@gg&ipR)^y-to_+9(xzLHR2Q4U$w8<&u;#OKepGB5LyDmO*jf+tHrE%L zN0EaM5L6-&1(RljX1UV9 zrK_6!pfqq$;9arLdjeZ>k+Q&28E9FmW>dNQ#lTxUFG!G?13_jFBFc~YZ)G5RJjT{u z?)@UWqZ*rg)k-W@<8JQHtb-m99i1F^BK!EP7rzKvL_=a?rP)oauJkUE1v!URN5Zc-s0QBR`q3gbYSYtsKvi%WaWMOu)ImPs!dae?t z`dTEOMCTX~&N=7}`J7|BMRqSp)buF8_Q2+W6h(y7$UO_E)`~JWi|LD4X%YUD4e~{&VRCY=JbFe3WRJMS6e0B?MU8#W~Z2W2oUmD%YmWax5NC-Q}M!E z&x>wX0jX~_KYHb}XhdU+RoXz5%_CTXJWth{c85`}8 zYL_M(;rYD|BISyX!21P$lBAVWZkRV9gk+U1Hs015i0z5N1B@iq7}2m;h%l9gM`H@D*tx z`a6G|>8DG#HPOgtjYV$7$`DTwsSMGNla(PmfI_ZE&KI+4z)F7meAqP@ipJmrMQDT# z?5AhSe$Kug^lsUJjK2xMBFGMD0*N}T>MVO#ci>`ofj)vCY|D1j=wfgRSwUJ ziITMTksB`dZhR51M@C^QJE%qG%JS8Lo7he5<(0W-Ul;gHS?+Q(@QRBdoFwEw_b>w` zPtlaBm1$L_Yu&^6I2GWgeC~N+H*1{;Y!GL^zqD-L1ri|4VX0Mt6b}HM^(t{Mc zd~H;p*j;PPKni}NF6b0T<(Cso#uhJ3oC+uc?}F0sj`%8fIelCDgJA$7PbbX)+Apai+2I$~gw&vpCr^MQ^xn87jk*BHf zro}F)Pl0;IhCQn!K#tJ}!We0$12=%_$rOq0&LpDZYZlESb^>iNb&~f(Y2ZR%%b9Cp+0t0(vI+)~4E2b-PO;N*e&W9~a zMc^rpI!nm-f!jq=lPEPrj?wqOEbxXB)^j2dVoeVO*Rih+1Ru{0-xPQdAuy%2rj|mT za3VRgsCIa$ZM;a?QyE|4Jnqzyt;r7B_?PRcIPT(NWU$pvU*`r1cN+0VX;ynHrW9{5 z$gY1`>Ha-xJPPK!u%SkfJ_a)+iBO%EL?9I39txjihLZ?3usgmJ+>(3z)__pLM&2IS zp8Mr(f!j)c=K?*#UM!byQ>>-A*~x1f+S_UMR#k>hoEY4P{Q`f}pZIsc5x~25|Ms;k z*tG>0+slv9%Kfw`#)p@TISvzdX3^Gb1E2MbYYQr6U-7BnXS7_!?z$d%*tS0TdKHcC zur!s)e{;UiPfa)UR5|-Rf5YxNUHcZS)6`VY*kF=0Aqe_hJ9jt932CrAuf(63(2<<{&cJs|gLbfli((ddOE8st#d`ubiNZ>_e_+%}j-l1K zcyHj7VoGW>qdbMc`GO=eM7bt10St9s4yQIv8S&9}UzUsGf;+PHDRiU$YAp%?)c7F_lB}2$t+R1i0Q@_K>Y79BR@#mnD>q zqNrX|M|eq^W_R9$5^TEbv|^N9i#8 z`pbo#Onnf7o_A+Y9gc5wdgHR*>FBI!RQ>V5)$T5isC+CIl@c;yH-<*iOEwYoi6>Rd z0Cx|#Q)2jUguVIWfh#K{goF_JLB>H+iDBzEV<4UaR8~Vmmld-Tnxt5>da3t_sguV;uoi%czh((ui@r)9LRiS(i>dtdp;PyCOFEv?ys3`b7) z+gp0GRn>zuOgCuk>=xGYSPN!c@oq8uW$YIHQeCpWo)wb0!;${hF_TU7Km_oUREN^3 zZs?rb`G~)hHGee_s{lm^IOoDiO_4Mmxu6CDaL|zA;AVh!B+Cj45+E{%1P>q&b07U` z;3a->d~ghN#hv?p#$V>x(HpGEwD!zUaJ={p4%H4~Yp2DMi4zEG#1K?z}3l z5G~ZqsZ(~Q9oJSJL8~zR*+9oyeRjBaQpztVc6>(MwyCG4b*8x|f6QKF&1~n7gpGn# zo9$89*Pn%=#jm7(j|uz&jvOa8HZg3s!mDmY+_RbDo|*x-1`r{5%}A%#!wP`9FdX=hGN;qzt7m*RHCOyIFjNiF6&E^D65zM9%sa+*=$%{bPqTm zD7rM;-{H3>hoENIJ+BRH0tBV%=YhAgf87);XV>lz2HCnd1h%lz z=HP|w_=7>=b$6Fvlf%h(m+YwAg)WK)#DkK3OnXLxm5i=Al>6M4;5SMxIICR|p%#*} zhS-O<+aq1TrhniMvg_X-tjc|4d+@#zaSC)XL$*)D7I<7HG&W5x^zo#tBH5bNho-aK zvud}jXh)~dw+27u(Z*(H*z`5X!!1}r$*OoJ!=3PS@Ql#4*l5%g>H4_(F`-qeo0{SN z80Sh7AhOy~m2fv-TOkvYT9=ehl_hO}M6giR)=<6R&R0L$cyE1&yNPpSE6U{k!Kt;)ZKL&)yIHe$y)>|Gvu0*zGaajC zi`hfMWrNsI-yn|;+7W>1n(Bl;$bPmz_>Z6gpnmK=+DFK zf_(@tzvDo#9aGzcB-?dqGMP|0!43#UrKE*PjnpLA%`pwLG@L-%fDRHpd6fW401K9k z0aq7;8K%aN@&y{-sA8%zU~LKlxCkr6fnmF$3qY}B2ZPNXVWLT$1Aj%Xv|vh%YaK}* zODQ%;D>;Ns)W&+cksQHm=8VS%tGMh6GaJ5UpF;^r$7ReLG_$Wnf>-SgBb^&?No-as zWT$MzqEWyn69$TR7_chLm;%%i{M|&NotupYw|K2Mx*(I0h4_455hjK==wYuKLt+sRe>*LBS<7dR1h?#!Oar9G0MnT< z1MUa_6Hy&G5lL7;^jk#yN*EZ!d4JVMgRLGRYmN5}*jPx%5^En@tOCP}n4aezT>YO-d^zGwal`T1eSlMeW~9x~&v+6$~EkTg2Ln#l+ffD#deiSD3CfBi**v$`>bJcPry&1BlQOZ!22L?!pnKGzqB}{4{q~)J{lq z6RYe=+W?9+^(;AP3%)PM*cGjiVcbm-?}-?2RdJeumagHcj&WPF;m7UNDtc-Uonv>l z`C$9}3c)EJ8TnyvFb0BMNr#XZ85o>W#)&>^B&dN`V}u&cr$go&&{6DDJGq%Sz) zL$GaT0SRzGgwSJg*TJVf)j(twd1JVXrf0gn1x{O_JWTi_Wnnr@nja~cXqA>?~v zG|~j__p@}rDyhe4w|&dD?$@^@HPzC$bH6j!UdESVv1Jla?qTjNxHE~(P)W7SFD}|{ zn}+$J_H>*_4D-kNs^Z7(A{lrwTle781JVPwc`OeT(=dYf01__1v0%{|vIc!yL*C}j zXW?@B$Y}71fr_G>!xt%QK-mHm9g8X;l`_CZ0YgS(>z*s$!Z!Rx@a=0RC#Fwv zR6UhbWjm(>_p-^C1Rr$WTz%}TZw{)t+>ODHmu*_r7&-!nxA2;^+Z^mFy9M$-Ti zO*yG1cfL2UDR)(*d~YecSm)^+g3{;c*l66RVB`Ai}9+$q;U)a?9RQ@jzPf%PqWdgWLF8) zE^WAdnd)HkEefkC#)5wn91vTKu~aKza!V0^Z|!UDPIBLyz>nJoFBh`>9M>a@)|54mGK^F}e0FAWtYe7`E%TQeMJtkov6r& zqS}aBvP={ON!8RXfZ5|fJ*buh%A=^HIiUaF<_Kiy|5|ARxdiqyz-rEC*h_gL^=fnR95+1J&T z2?D>^CQ0I&Kfi$DL7eE2>pkU^r2{VdUFkm3`BC3WH^70DL~RW~tK|rw`aoJIlqd>t zfTCD45CBsQa~FHr7lK#pjfcq}uL3qCtB8`p5eJ(ftfN>>H~~^l_;TGcMn>zA3PrH zWU(iLU4?mi^Ao`#_U$KvM*{Y2=JR>xHwZYr?`MGqcHNV~?vfDG*9R|Q&pa8tl21-{ z=5>J=u}l6E6-GwB7`%+VY3F9s#{%ol_EXdYh14u2_lsAwYcIY=`vx97{j zUzK_$>)Hlf-xVYwxk2Paa;~>{8{(pHu(>TIyUr^=cP9(W6CL2&$P?A> zct(>mQTnE|1P25Zt8uPtSPmn%7$Him*^SQ)K@8hX*xHHUrR=#k2R9+_+6^zAy5_`*?94ne6pQWy0g5~1^6i&W zL<9d06g4UeJJCz&Wsf~wwvpYP4fte=`=2<0c-5-z?U!@n#6H?ofTE>5U#Gl0yW$7I zi_6d>MMkRF6aNyd^tGP4rnU3h(2?7}A3TWw$u)J?vJ>BjJbv_@fhS6;*f&2NxR5>n zbkKjhQhot@@p=Rm{^sf6uh$fXPqJ6A4}CFsAa~}6!G4d}IG|S9HKdC*`{srENMn(n zz?Y06we_#k-1w5|9B*tx!U+2b!?PCM&Ov=OZ56FF-9I;nMZtT3Xy+!S6IN=D+V66m3)tPhJDB2IHC?Iz#;Gs$(nLFIT6c#GhDo#w7yg& zsl(9+H4piHh+|t}q$j2z<(?w*iA(U6%|I9mNdbt z8Ii(aE2_jzP^kc?nd|tEV631pX4x-)6?|gb(y27&&5QBx(y1}$FYDAbW9i(#{5lx( zi9O@3U23r?y-K?6gw!=3Q0vDOi#5zcXT|uXGBAqSa3jC zg^Vobt!ESRD$Dy93<>6w}x4yByuAi&`S2%LhchbzZPUNL1 zw|e#U<@-eyDYRqlw$(e=zMO5Ju>HOKxDA*Lz0Gs6=$yNuvb>KS-B7OVg{cp+g(O_@ zu>>NQ;GK@^a3Dnta;wL{la9lnVquTI1-9>98_Ipq8%^=$h_ zjC9Nl0uXT4+shi=!?Qf|gkH-Tu@ANZ_zR0#K_?O0hbJ0moj1Z=EF54U%`eZo2A89f zbm~=|(9wZ9wRf7O=Iju1WQZrt@nG>+H$SH<4Wu3o0dY{QlKk>=yuq37`aZCgxpaKO zO80Dfoc*RfaH-hbG?6l)AveN<&fD3J79Q&3%}@l4HW;TJZ7_)KEz<%(7BIFIU~OU= zz&P+MLBoLC99gakg_jV?0L@J@5@DaIERS3r<{C;|Ga~V@1?yN2gRn#~!%?E?i^AB9 zqu`7L>Jr(Ija`-bG42{)v?99pfK1-r0w|(c-1z zeBBRg5)$;gqQ3R2jvT0?$z96H{2-V01j1>7H z`Ac8ypPrj6+M)Pfq}Iro7GaQ_iQg*b=y#fw92$~Ui%x( z@}$r^n(faPvTS434XLhP`T*eAZY=kSs4=V8C7EQE|D%W6vh|oRlSE#B;G>$J2H>9W`si?*zC$V=#4P8UcFe{oY-dU1xf}4ma8syT|5xG($ zmSqBHZAFx5Skc0$F=?ur4cqu>vi!z^qi>8Ma_-H2f3*B%g3w~d$D3`p4d21Xr{){w zzWjDgb5fkCn+*3tmvQ}Rjt70dH{LjDQ@3&tdR22>eQmJ^oi_laoLCa=lezECmTxTE zi{%^-!-WRZKBD7a1li&yytS}QA~5oSdO~rF7-uKgl8Y^rSJ>NP4#2SQtr3>{Ch@lM z4U_B}JAnM^Z*3pwn9t4J2N;}?t{$9h!9#ITU{2qY`9=U|!**0o!6wdBh{UfBYUyLtIINg+e_)Oh&@ z_MV0E%bK#3D(2R$tRxI%;g@BCtSO(~Q|b0%x6%`H=>?wWRY~pGU4H?0#9KZPe4`lR zH^81s2lpd|>uBgI{`+vKj}rahW&zDmC^f53j~1tsk=q2~*_yC;-YX$ziIxi00V&IF zHU#FNISW~Sz-YoC0ar-~>iX2&{3JS=1Pct{yeBJ0=jM=vcj(aK;^ILAwdIyhrxy-p zW>beq<3`5pAz3;ICzE_gkrer$6rumAAoEbP1$p0<#_Z9&8F4h!Oeu8L2SaTzgF%^6 z8LHK%(zv@2ICtjebg1uoWvFgGW#Rr>CYH0Kk?9+QM^9%@U6USzx(~IFtcGhkyRFu~Yw?S6 zkt>iC2A%MlG##+@!)wsk{u>%1UFrT9OJBPWzXq*XSIb z#y?J)85wigjcDPF3=87geb>@pY-T)Ix!<|7b1o@dk)A5PfF1?+aLyR~{`c}H#P^%j zU*OTQg?Wt^6!$~o|W&B|IOAt8Ju-J zZpYaL&m-Rbn?u1htmDJ*gp9lkbSCg%G;d$EfJPiM@j$4~1aIjV;q}OS-!2#U;$nI> zaQTI+7a@TLY=Anz=WOR&f)^dguR*>NXlWEj2C`S!AM*ezB+~L_>xY5|E^?c4SaYF? zJuEdzx=rX1iMuYWa3)Ccv$z>oP#akcJwBAvP1hWKj?a+I~C%9$QH= zk`Qq%)E+<_DsLzuYGcHp>wu~PC=LKd0dGrCLGYfPDwEp0cm0!TBHkNactYbh=AHa2LFYQup1%;$b(MG5%}bw@Pdd1 zdEHg73~m(dugW@Sf){)Ecfns5IiH>W^yObBTmQD;2G-dJztDFU13TDt4W0#tnvz|D z{XM&u0^98Ca`>~V#KT|fZOegA3Dat71&LZnnqkksyxixZx5(&>s2NRtNe2&&Qx~wi z|FZm3!sx)@AmYjn8}n{<=%(^D?CUSX8M(ST+UuysBdA`4YaSQ%`ISt{2|}Rh7sL*w zx*bpsu7T5lHq1+Ej@QO*rHY48Uww1=8KExL&^GV#qB}#qXTZ0XXY7k(HG_af74?)$ z2ObnAhf{4xY=0eMr+436x`nO3wfrB&+K7^D>0?jbTK;PG{PV)*5^xGs;p9UH;Z=yK z?eVc1&bz#6vadU#oaL1T9QtAZ1Nmm_2Rv7KsMT3%mshLwc@Zs#CKbj9)rt5h#<`O5 z2iZQ*kYfl0>Jd$$`QtF8pr~e2#k|sB>;qyUVgA86$5R_*?BNj!Aq>L+iXZGt^(tVJ zFjqkh0OLl2sf22Wh|Z%v3nn2V9_Y4s2mkZt{l9PX?m7|JHKqP%msA)Mf&&lZ^I&B#}f`p!NkkCyWw5jk8uGZC%Y=Rv8kU5)~J^)<@bZ!#} zMNLAhJpr84KfgJ6`JYvVys}fI4*YLd6)M=z-i3_oKWhmoJ0p&4j{g=dVTh!*u7SY{ zTR*t1w3q=pG~|&Pgcm~P6y!xhphG19AeZub44^60RYCfh3aJhyA~4Omgl z03J?@nGvv@=xE)Hl+W7 zZC@N{PgE~EZl~4zzF?%HQwz4U(cSjNY_h+%*-4csZ28jsP-B~&_(Z$Cp!K&ekJ>I2 zzS&2$sYxK`&f4rF-K`Q}z1;gkiUMu+;!53QYn$UW<^vQq`au(9DMk0n_(s26KaQ9_ zVK?kOzHzd>DY;VE|klc3F$;ej+_Rx}2{k0m8&bC1wxM^=IfT>wTn zJo{i=HcV)G2{O2qIH(>IK!5`Xs7KKrfeBJ3;Xs3q0HB}&t_I8!E3U^BAhb}uB%)k_ z2?C5`j4P9l_7Ig|jKXIP)mBCkMXJ^&tMoTUgKlZYD3m~jdx+(^4!r`4l0@u*iYx+n z4gxI=U~?1&1qj_VaU+iUK$=5qPj6ee7RQJQ97Z#4u>!Rv*I&hZl8eH%#7&bY?EyOpff8X$}AB{Hj01zWGkShqb-OzmDDV zBF_Q#relJ7Iy7=(7xMGc=80V+Au9DwbXR#xb(%N=r&0&05!H&PHa58}QTKu=l%IAe zy~ti6gXFwiUiluK1YI7T1s83iZWPym$J~jcjbbn6MhzwKj&j2DLyKv1ZWM$zlZ4}d z55jJe?~wPz9y=ynMCtwSV&zqWkG=Oe-oQ!2a|~5A@K$haYarA#=Ul@|?)JpDIdr!0 zQY6!<5O|Lqbs@y={tJL+r7Jxbv0Lx<_}pCOnd`)D?CVv8C3N3+%_h$w_S7?FTisT- z-R-}aWnSdD)aOis5mvbwEG5s}?cdA%H9{fx`JVAoq@33XLDp6y)PMxjcQN~Z%C~{t zdXImM-SRV3RJ!9Ag2)=L@Eu`a`-RWPPHl%K|Lb1})og8@fE3hi;=%1O4-2ot;k4l% zqJ!+cbwZS#-YyF6%{^Bqyqg_A>f6m`m=Bqfukr-h)REF9fMJBVed6Pv^^Mvmu5A>q zWdpy${Ce(heQ=*N2`voudM>5EHnG3|jqegBH4E$6$|d41CNvApEEp2Evg@Ar&9W~y z3&4nf&ev@}!a$3#kFA~bUBza0_%1C4QOgg1=B;EO-s7+EhxGhsd1jA)C!6?p?}u1# zoex`gn=ri(s@w!8rlOFqGkM^!B4h-Ld=nM1jk`H~d%I9s5@k>A@N8zS?ZWP@NMR>x zs(mA&1Nj$3eKfJh;?N6T-;NvnSi5lXdfuwIo;Ism^j^fg9YSOapNsa?%qNOdAFQWC zP}#>{?b*cswM*E$$xdLVGvsGGWsi$`l>WUz-1Mw-hirQ%FhOpEN}6-zLc<+8q(9UF z5&vAIfJZ*Fm?ie#L}r5ip8!=OyyFp@+#RIP6}GUqzCzr%^?bKSW3b!0!RGkQ*9#smJS!6bTCoRTjaN<2zrlTe_v-VE z@q0i0CF}#;!Y{}nQAIKyg_jAT4$utdJ)2^Js>D@n;1MnF&pC~2YuHt}CMf$BaQ=`yV}KpeIv z_CEw0jF_Q^k$(^HG|-Olm;fF`D0Y)T)e_o|Q~Mb5WOV7Y1=wRGjxt+GISlAE@@b&) z$O@htWTJo;MdTc?Zb#L-a2UI*KS5izoyDwTo_Oi3ZS6pcL*D zIT4`@06;83ABk*dBa9S6ya@2$5UsJ6OyGhHT>uJzC<2p6JqD%;;dLbnCk)&&phE&K zTm~y$%rwadL&_L?*9ZLV?CzVydUkaKpbJl*f%y99+aSJfdYkW`QPK_B;V1-GC|kr( zw&*GLv9||n*zqTQx3fEz1NVvJRSg|O4#<)_J$^C|-GojFbIDkL%nn+)+LkbB$(?{k zxEd0_rv5VDHpc0Uh8L=1h)YnCaX}YT=Lcs%IYOKOTo)TtdaC;E+(Ul(%kRPE)6e?v z6Km#LhXEwuS{bQn!dUgpB5IKnIF2{vfwRtz$qd3%bdY;vs*UNIDjQM98_oCQ5r{fb zY_3@{h8@J*YSm5XPxP7N?oIi)<|cNnwA9v4kft`wHr`tPkg!tQ*xtyK&RP7na)LtX zx0PR1l0nL_@4^xkM!FHVe$59VMc?~k&sKKV_k0_Lh&fT!Q;1uOE4A~TO|wK~uv4gx z)^|0tpDW^}LZWkUxz(R;K_2?-^OzsM=vurFPi^@Z!9DfTD}}AOsFS;;r1pN3n~-pjhErHvUWafA4r1 zk#<419Fj2Olmu9_QU5`Pu@Q+Y$gn17AW|@}X7q%qN}58m-ckJ|6jlO%csM4(q(~r( zHk`mZNyM@ItpssfP-n3OyP@VNv-B@y%HMa|)7m+##;JB;y*b37Q z57$wyf1h_d8bYnx<_Endv1{%d`F+(nLx=MElP+QV+H)1ME$sOHp7M)}l)2DI1!gk> zj-V?dtq1BFy8~?FfaQj`^2ToHgubkN*T0EDyV|@xf5(o}=U$aPkE%rK*dG-v z#GQV^nA_TzuERQ^+h^C`=DGCuz2AebuEhzgkv238l=4^rKn`aU?C%k14)kwYG2JY4`@D6UjXSd zz&!-qY!X#wRpqqG`D@}Pq;3>6L>5Jtrs)C|_BoscfS4tc1lE9dl0?FAoHz}TE|d%d za;rrXq^S&0d+1jX(+S`J0nye>WQzl!W+f~OE`=mAlu_OZLHvl&uI7#Gw(!qErehrh@PYc(!-2E(?s?sG)ufU zO-i7OUkvSgxV8vw(NAm^g5wuDz2oQv@<8<1Nxcjj$;wc4O%!R1D_q3Fzc;v9~GGl~jwIY6Z=%XD&R1nxw8~C#I9ly%P@1s-W6s z6T_npCW@nc! zwL{>ZuIrACrcjU7f&IEg?ptf-7GefN9s8Z4)!E+P+-GOJ)(kX{H#h~Z5q3RFIj_t^l5UO3_^8h95| z8IVfTEu`mA^oje_+MLQ;h@64q8-BJ~+o?cRlJL{vz6Z#_t|wyWQWV;9G!W0v3K3|A z9e)mnM)0I}JquqFSnmhQr!o{iD4k?i{|b)Jp*5a0?3F2h`8s;56-zFk;4=D^ z_$Fw~UG|u8pcFpk5yE4gbr*GHE{Cya{l)no|T&3tP*fU=*-AeUw zc3f%SdeNx7svD6N+8@q-&0trb;sof0hH!Bb!Qj@@_=2hP&ZGhg3=ER?I3u88#N?m} zQXvC@1kUx*itNnnT!j)k6q-)&qZ3OwAsv+P5@p`_>hQjO``nU&Ak7#l+=VrAnzANF z=vASrom;|g^&AAY=&XUQ*(Am}6tc&r{@pKh{@LoDK70JNe0)wyNmH7=sXVadQg^}B z@uk&jFReZ-t*kxWp-znmfo*JLQ@Mc3%W{N&XA+G(sS8>M3JM`lD~=LE*z7I*qrl$} zIIbpXGTc9iPG}OVikENn<=+N->^Fjx155JjO9Zn`pP6*Q5|^{iaeuCCu%U}R_$q(I zmfo3giO7w-S>l+EIs0^j3&?kPFx)k2cYu6^9TIGeG$il@MOj6QE72_iwq<1glsKaBjG0koABpea(mgHlBc1U?H8l zC`i!@pxdbh6t+Vd$rbJ;W}g`h)bG`j@hCWgC1^4_7J>y@0}xxls)ltOh8BWw3J|jr zCxigVC#ENX_=m7XrbZP0E(46o8U%3?cUnAL50{Qa)iORm%}3e;M-#voASaY8Q$cPW zJXzq`j-r$a5%mC+2|^zvqkQZ;rCW{~KzSeqB!cgi1pQAWM#>u0F6fR?3+iJ*T(=RA z`l(SARx6ZGvY!mFc_=sV1}GQ_dBpVPq1pjY6hp^A2tbqHFT!zm6&`RY2PEE1r?kwHU?ivbUGupGpw+AICRSowBRTG))2|!aY zIw19QyV!dGz6U55$lHt8YzB}j3U^?r_nz}V_P^hI|9d028ssE1okXO_#?L2nrBess2;N52U%-`EYYFU)g^W`gr{vqR|7xO6T3 zd(ZDVF^%fj=0XmrSElh*w-t4|G)fg^zblnO0yOk8`O36HPXb z)35*Fjsr9R69P#p+;hN9$^{7?r3zZy;pQdH)e18`B*`HIXI1`%9lJk~U~b@=t@5mf=(N@Z~f}{fj*Lko^u2*Tey0=fgTpvPJTG*I2l#>u7tqO z7U_$xXt_1N-g}wo6?%f}rANSexkJh1#n&Wxn06Lm6Q9hBKCv>tvauYmZzz58{#sue zTkse+n-Rz>EJP+GoA**-M2k{rn^ztsnUQDQXTBBf{8F-fVf|vh3gUOHjLxxz1uDbk z-Ou&Nj+Pb_R}Q8J|IFA&=v(Wbnk}q;c1;#HYpa#Yg=@5H?P-g$HO;fU6fTF{rz8VHM$&dAkCW<5=!D$BB`%=%;`fZ{yg}&0UtW1xa zuRDv-Q8t7d0Z*W{bd-3O&pI7=Lwhs3tMM|NP-I_wOd1gUnnn$%8jueWWxXi{2%T_U zLv3NiF==GWHJ+r+iQ%X3Lc+y-N%>xtp0R9BvQSs84u zwFA1V)>Zk6fv&SiW7`cP632Hg++jBg_UG z^CgIl>S0i*gz%z1ja^U(WYL7-5{?#-vR4L^BeF2^2I{kfsEbILL+iVxFd5IIvrAjf zOSJ0~tKgN5#Ge-^>c+DGIiYbYMt>>nV}_7;Us$ay&Ccgij}izDHk>R#Tg$q;a&{d< z$zt!|d>ChmoZ*-q+t-bCtt*bh5VqG}UJq$rD(i!`Ms?r`=1Qk?wl?_w%*Gn$L_Z;A zb(Cz7N$x8Z7@eNs$_k&W##Ui4;VjuOMt@3iYa3BUoqk;U{&v>%&FUOR31(9yh*Nk} zUo|{0Wy>+XrSH|?2lOj?)zJdIde+u@@giah+TK|1>p3xqos|}Hx5eq0Iz*iyv1caP z!j(ujbi;}YaE0X6S^PWSMp)0zwp1IkI) zRspI|s8|$QJ3XewZ10kwu(D}uIZY&43g(dWVJe`oO2v?jtX*Mx4)hqUifzQcbc+HT z>B?*kVlCYRVpd&fwnXQ9!EZ^wyv!vL`5LydDAV}N3f;f;lh)_fF7`H-gkyjhGH9tQ zf)4@`G-JC$CFHtOK_31CF)H(Rknv;NUT4-43}M>+zAXHGZex9MX>HEGI99^^0zK4||}?8r+C_+M5RLGb;6}w;9vQ{byix@N|P_P(6dH2FuTS zfpr&YVpiBNyk2;rP@6zQNlUl}O~Kc)WD{lOP>187IBR&)@Lg8F2Ju1;451Cb3LvsQ ztoQio76$M}fXtpVxY_T0eF*^g$6wU_vIyz?ZLHg$s0SoMjt21DDodb2aW?V0G#1y* z3qd(fJEvnJ<{k5)xsY^T6Qs-(!#=%g&j1iUP+4WN(>gNb0!N7_7m8X2Nc(fSB?(NS zdse+nbMbJ+30WtJhew;Ew-;MbcG1^S^O^jB?!2U*!&6V4m6J-i9Y*VNIXy~ops%z4 z((;DXgw`Y@)z;PjFq7>rKU&5)B5oGAT|wb39qk_l_AvDG2TMerl2RG(hms_On+wJV zO1_IOn4_I0VpAIAMo#{meFhUn>99V0tk&R00_w4XKDP|&^CG#)g9+uH+0p2+Sy zjov$3Ki@Z*Spb25@m>3%$;%Sb;r=^wr_KYQGZGljI zhlVu%ONy78X4`Jb9uaQn^wjPuJX-K^h3sc~4^I=T3=VqS6orcev4^KEB>43$(6ki{osy<^sOI*P72fdgR3MN2y?ji;vT}q(;An$OL#y_KZP-zq??% z;gs+MS&sH&wMDNj^~{MNk3VWe2RxJlm0*M~U9kk)G*}qBRq#4*(5$a;wtjb}YZ_Uo z6Nv1Jys@*|A=qy(OV?*fEyi`D@=rdhI9bm*cQHr&h)Rbdo9Wv#!lveH|{bKseh z@uVRy7|1O)kLKOWj!!&!Mwg??korF4EZsUY-P_Npjj=f0f=Vix_?%^L^<-&%6(b6( zRk74t8J-mkts@Km7(XWbm#m2^9krBIh3QRiPJTE&_vd$hNK|_Of(?bT4|+RSB3>W4 zi@e&w-oy&+mM-=#LnEAs9-l@n<>YKwmXwLP3~unSWF=D^K-iv&GzUKKE$nB`$6?TI z%cf0g6&baMp$LgJ1-C@F%zPV$H{L1ub`q{{tUiV&1eGI270x-&EAYDoy$9iZJOx5@ zE*^tO`mIk*+%n5Z0x`cB=u-NZW&YKHV@Ut{Z%ll=;C;;_c7veL^nLma0?L??A3kC^Rz-!Holbu{Zft z-Vmi>Rvo28s*ekdR<785s&WO#{dmr>bqo*H#@}L-aJxt!b4{DCYj9${%E6557xh&A zh_XU&fVp8+v}&;nk*MM}6Ra1?%R!`LuV4HWC+2&S`4NTFh?|lZGJ9p2GBE-j^NKF$ zV5$kE-~5&FTWNB~OWvm@k`YIVc$fq@3!7pG3+7~H5D_gBBfxPcJRQAXwRt5X_5S3_ zD~6545J1&Dh+t&U+vp1o=PHKok}a#38sw0TNuNI%e_nhUY!8n&Mxgv-?zr>m3JuTO z0nM*1iHCazQ@q;~i?wwAU=*(UUtYt?SI3x)3V!$JjhfiQ(Gpbz%B9lX6#FQ6HVt+C zLWCrUS@3X8o;>ZC540n341qKBs4zCt74D%(&U}81yGswRLc;1pzAXzM*hYP9AxYo5 zXY%&`rUTH%&~tNh1siwms4U8FLr*~;aQso&%F$^g)bysWhFGVL!$v%O&SSnQ^zVlM zUA8mNP|+cR+>VM80OAy1Aubdbk8I|R*5f-OvMdLIKTIIzH1lj|4e`uL%l3Y$(_5Zi zCzgV-$CFOM62G1(h=hVJr~xX6dAIk{i-rMCo0@((nT8VZ=xEbKp;Rf#D;?2SBsT_@ zBGbCX_=CzKJc~#b39HUoM4^a^C(k3n$0h)N}++3D$0pM zbo~l5XY0X?nKo`cJ(sTh?2bFqEC2E2RG-E8ww{?^X9PUY05tP>=~jg$_BOB&BxELP zToI_|q;McPCVCNHFRENKEYYvj_jR!^XjO1oI(W_KW>i;~wxUY#lJ=Cj*py2Zq2Sq- zA%Lz6li zN)SC)lvMSIkL4&W)Lnp|#ID4o%?A^uBtw)8um4|^Qn@ti=LmROJhR~du}AI~9G2IK(AFsp>)U}zM0Qn&=e?qCNJa#j~~VUo;f9yl&h zZk)P%$Ng+C>LlKLN=}GfNXWFxwrX9Q z&rMIymfX1fqBt*~2*%4#Rv9|!cOK;MU+DCER zB(;ayb=`71q7n|hNu;996Y>K6V97t?CSzQwcI$LNVVTn(l38$=dLjrDK|qW_v^pD zYe>aO!+XYVy~&$!*#vL?O-f}cYl(4l7IB`<*7SL2ya&)-w#8I49%ju+M!SQJZ>O%Gome72!_c>UmO1Fa#P=;PSa&{(p z#*RFr$8@m&z(GaO!Bm|r!AP^JK=Az4#bx@NNE(sP@F}RQe2Um&C~Klncxd?aWX(E2 zy8FPyhYM!u?9diw^X1QGt1jgF%=#71+QNLkkceiIinN!*=E(;Wgr2~Qg%PIU#Zng+ zI-&H8=tLwrY0OWrHV9Rz9p+@W1nifb4JHN;vMpSV7Rd^t8;l>DVK8~c$!^|R=PKw^ zX2Svn>k?HyLJAUGuCM}2eS}HatOMN7d&O-XF19YdD-y{yjRwxe{2Lk(7bDzj6HF?@ zq4u+XNxi|=-%!KYECD_-HOEF)|B+9yA)x>j!VK4e{2Cc?`}7(S@sh=6x{<2LB*hE> z+`D%!iPmtF-@KDw5?Gj7M$3f9?rM zZSuY~c?y5G#)ZHWi3-RxrnY-TGIK~tQMV-!BKscnOepK|DM%{EGD}9#OoUN$40?-k za(Rcc`cjHcFY>IcQi4&6tV-6Q6v!$(a;r|+ckXX>!sbE#xVMTEZ>6_vCXd)f<}T-L z8E^2QMBaE|UL=KvM7|P@&tpz{$~g(!-cTaEMUa(L~6(jBy4Maxe}-uSo)<-z}YD08ajq;7kKsf zGq{I7w@66Ez!O1krEP-6$*w)w(kOL6?!=a7J4moJT-iBV{ zpY|_l7$QS8cqVTx2+I4IYNm3zc!^3;J+1}Qw^UL|?86KucMv^}fpEvNQWU43@53tKebJx0xV1c22gUN^B{r7e^s(rE7j?A@mTLO$=7;iVnHwrYTtVAn zXsWQ%2gv^&%8v4h8%4H~@5L|qY?cot=}_7Cn$H1&6tg+Dm4JRcM#$1U&{_IYF3hxT z;R*&a6pNw!4hVN3z2g}pD|ux#ybRn1vOK(uG|r&3XpTZ*0B$6=b`l3D5&ei{Le}Kog_%hNVc>n?wTM|OW1qb(OscpB zS-sb34U-Dvk4P{X0HRfM;9&ZjAjevkFQ8c}(1s_x7Wjz;p(mEZcwji;@GEzs%aMk{ zN^~K%E||1fGiw4}OawbA`>HrX1{ySexm$w#bZ&O2dxk(2(Lt?R;p6w13L4mVPsw2w z$Af_EEzw>KEzy#Fo}EKXZ+0Nh)%F>rDhVIS!GMGg>oSL2p2Z?h6z9$LBI!q9yEO?V zDA*TuK@m=pBa2xqZg<1Sp3(32OMK6y6~R*eJpTW=q!o9FmbRxEEEUn<@O0}V0e*%jhOAiKg3$}&Z76O|_S zm0eYKg(Dh2P)=q74b4$eML8eUGG6u!rM#$yeQr1~W%&Oh8d5Rir7(xEtW312hE$Yo z=F@th^bl9TDYH3eDL^A3-zU?i=uLP(pf`O^6AVtPSIx59Q3UeDye!q#nz1h2?w;Aa zne^E5*n$4x!x}mfrJrkW>etM42jcQ!>E-3#<>i7TIegfAz&Hn=|6tx6S=p!|xQ2Q} z6YFG_9LSKi@}De*B*8OM{$0v}{fO-c!J{l>q+T!0Wl*mg!Pgmyxw-cWFX*iV7j`1> zrWp+qfAbLzHk$tMgJb*4rwd-)nS6m36e&DSM0-YNPlInv+_sZ=T{V6B8xsc!$4Z2* zHW;>(K;CSr!gM4s2pYc^s7bSt`*)B_zJ#lS_NTn+f?Sn`D%@Oij5Z%g+$82WA9*+jh6EqU_}pP@eks3l0k9&cF2jiZfLi! z2ihYMwyp>p3?eL83`*CE2zyc)g7{(K1Rm59#{J|jE*bGGhVd$1W~C^hCZqQ4 zxZ1M^3tUQYOCG*9wsy(*S&^+NMLs-$BRR?&FQZi1pzrEvWso*UVvAtKY^gqoE$}vB zdpWiYQl?X5L`}}%Sem{x?YvQt5QrpHIqE?H$@FXbEEgBab`YJ@T>dUY!QWrQ{pAKQ zB@#~&%C&?UpuIrjNHQ3TC6O33_E)XY8_B?8Z>Pb9UAOeH=)?pzLrE|;^Sk9R7u!=7xg9!QEjDH{Hd5rfrSP7}R>^Hq z96UXMgEG73)uC>0#>bJM*b*nGGf6wWnXk2nzULe%k*7I7;q}|0aKG zt0(fYwtK>!a!;7|mTjHOhtC#5*3Up4qg)GkLejFYA73&Ja{9h4QD!TQ$=>~r;mm+y z#xbc3bw4aIYLkNIamkSB+3Q87Y_HQ>q7$UQ6*|d@26Mxx;Lkb#`Qp~+d`46X{4RRC zLrwGTIm0u^*o6Ul(07F~V2$S@SZUs#)0nFenDUCPPTijUc5{b;lP(_gLHNRGRebQI zPhIax8zh@Gkzo_;A*;ZgsfBJ z-C@Rpemrledbq?rxOposr^>G6vx=nVV^?!d&3jRm^<&X+`>1!+(A$t(SGn5kAJ zI)_FmL>;#4(>SH5RDr|OuB(MhJ*1dox{RDli(%2YP*|eIH`EYtI-mD^`Rkp?a~X}C z)m4fjiGNly3<|sPX<&=X*C{L)JADr^Nry-3RFG8Vxu*V2>ACV;?V8Qkt7MXf_UWH{ zaO{pjYs}8pl#CqF2B~BYeYKd{rVz)B$q-=eXyNz4cU4u?k3LBbvbY)22fs0~SIpu; zrYaToJVfmGhJMm2l>9@8su(TnH};f4&wj6!wq&$txF4d{Sx6bXAR6+ zMGFad``kpI{E0kVm&8<9X@+&=HCW(LR4c(Cd_7hwh7k>$N9tD243+Z>iKod=QG^~G%+;(3^H>SO zK%FCA%?RV6+*Su`#1#b_lUtbNWjRi0=~Z%j(%5Ov2mPxOagVvTAr6O7+3Lbl?}FBS z?2D=9M_Qu}qg8_xr6T*-Shm$BkX0N$e8Tw~%n#?~ktWG`XWc0Cd?+}$h85gjzOaVb zZlDpLt6u0R&5%5;8zyr=^QXnK&yv-Qc`rDxhgRoYu#V|#w;e{EGpj&Q6BBBUM4FThl!+Bn3KvmZ&yO9q+B)X$oUUS zYZbBNTQW2=^qo&wrZr~{D>P>glMl~rSoQ&)`T6^STFGItrwg;mQUvKT%r)k8j0+w3xO^EI8Bh2Q)nxKV!9H$jyll$ACCWj$ zOJDiD109+7ls*(8WuTlZ_<~>lLWv~wO7?Kh1*3%5rO1}0Q}q{yUEY>tBR2vbBMbZP zsN895#ndXj5UYSMYD<;YsMbwdH1MG$eJ!q8`B!bE)S+A^#X`FkIu+4?s9LU+yInd4 zwPLF4m#eiPk;?m3DMUX{aia9!FHG(mx)<ke5IJgV6as#JnaI#hO*+W(Z>t#b3CnV$MzC---% zbfpUUuT!CQL8UCcpW+scl<4?GbIWo9Jl#crXDF)p8Z4eqAlzhig@*Q$X3A+si*n~D zT;(eq!z%>a=R?s@*XOmQpj02HAhdM+n$>=IWX%@_m>u;FLlrBOpOEUVLYBG&Cz*|B z>bi^}YtU1uR-ZP)7U4jK(pi$zYzfN$bs1`!K~R=xAl2Li>#@+nQtdcbAEoqa($R83 zv^AXBSk6AN3&gyd-caRonvfi2)}1X?NCmYSA0R*CT~~5 zL)wNhT8uaNU&BQV$CrdB(S-PYDO`?U>#bGg-?l@|F?Ju<{bRmfxc@gbEP?+L+MLS^}l;*TI zq^|s8k4LGtMU5bTPeAc1VLay*Ek103cV}zpsO|@lOV%%CSh$FXzyLj6(xObP*OURO zqCqK7<(E5FM3uYl-tK4ZQ-i5cB?|=?+Q&y z%Cr(`Hp+1q_Fv~VS1X7q&n&z}o~Kbso4#>0s-tFdh5mRUjjuYDsNIg6E!w5aJ+PPx z$~+IZPyg?Q$*Bp0LHhMaciuX#)S|zpKmD6KOSg{XOKI@%#GI>){?eJ3UZKtXuXzrv zZR9`unGO1#NE3@6&^ucbm6I19Kh{g%dUx!F@z-h(J@tJ0_AL`f)35wv+Irc2Eqh?4 zP0ulX;Q92oR`=cWVp}%V2x>`?v}yWIKRWJPQJkD@vH5yL@%9i--D*>HyDFPn>=@X} zEh@T4^%_m6jAH9M#tRGjIQ_>ZwXMU&iUoU9 zxQF2Hqo@czoj5Pac}CD(-!x5FSg*tC{=zxQ9;nw}ME?qx?YchX0am#u^nt*C?R*}=0sSYlRc)(i|FB3ZlCaoL!w;Vo8_(X0~~jRe@!Hz&C3|1+od>`5Q`E8|D{ z1<|5Fe4zB`5~Wp*+Zb5cfJ`7oY@EIbjD0?)EpFf~`$(SN>NTNaZPi$wrl2|H=OITr zXjc>J-gjy-ZHH)uOp{5FISBbBbj^1@*%`Yjo%-qVJrg#E=v(xYI^*t2PCVYLEIo84 z?+Z9ld*K*rc!Ay)uRQu}d_n1LqI;zhRX?4pKeyVKgZ&3Cr|)+6e)z-s=-@ZBcl^6M zZhf&TWTjeB9exxwE7Y-XLNB#;lct+(8f;QjKcb10bXmy>u^A)*#noGMgsN1eS3##P zT{#mP^0gWPcPvpoD?xgyF?FCHv_h2I1Qt~mWs6$!C{!q6oo<Gw>Za3DWCiS46eZjzlmgb{Erpjfoab1uGUn34x%(>RPt2C?tCl}O znO48B;fd*pLy{POSD3jE56q?>nv=l`Q~KeqdgdbOY>7<$tHP#I#m}B$!ANY{giIHu zDTySUZ-q{jApbz|$)QtG%yRTaQ23L#gQo=lF!=pe3%G~0UB4_vdN^N^2^=u}(+5W# zIM(8+`tb?~CBT!@P1gIHh8?&Gq1S(K9?d0$BHe=gc%D#|oO3{s` zWG_IZlw%>nY5fv-f8l#1yV$B^N%9C+9~=X>Jf_R5;fnp1SyG>Ow>V|sBc;Rc@xu?$ zuCY^!OA$o*z5E~XdxziLXV8|Y7`9e85?MQ3H(;@L^->0t8g_jZ9s-?03x$qF2L)bB zQv7j+o1xa7TwY=geh0dUmrM3vaS`sb_g;jDzpemBxUw{<2z7;PxeEV8R+EK7^=g8j z-m%J;2^f_#cEj=nh`O{KDw}?DOY3hBAQ?&O_v3<7+I9zvsV28+xthV^iZi=IbCYwA ze$(M}IG|1r7t*3d`74}`Y}%u^v{qMpdf*5K}~MRs|gs`ZYv_-Ge^T*=~=t2p82Z143Nj%8@V3q~-r<;=s*A!&8#zTmN9}wvP?@gwO$sUZeEX z&+oirj5M$>{p_wgc8XhEeMP&RuP8nCSH?ebeM4Nz-Az_v-2@D)(W;@*roF(-3C$Oo{{IvjV`?dTa_rF4N|K~i}j>Jw^KTL*6V1lv`p&I zk3BYBeriCaP{Vp}K`5l(`u-jJcl+}*C#cxn>5p{A?${}xr++jyc1N!JkGIdPU!?8# zD%DT2rjg?-rz*!zrpJD4Z|(-YH~rzWdwzcW__N2G$MaU(6Y|NX^o`{`bXTnJy>mjJJ)ORBm~OJa_?vro`G^o>jDr+#((!|A^| zy7wk+$=#>DYMWkVZ8|5>MJu8CQjmHW?0kqQwIdfp-#3w?&vv$a+*Vhaj?=~(9_VhzR<{L8r1@-9X0?Skq@ zg0a{?1;?zAY@Rw3mbNE4a%HvqfoZGeoRtyG1E=}M=<2zvHurovz23rg@M+wCR#@K| zGmFd4B!>^mhy`QD)+`sot?~l;x|uUGqH^}V1%rZUPDP_70;h0{A!Fv6)lbwf^N4#L zX_GP2_g2w&K>IXEjM6&=pyKgys-Z}Efuj5|)QG7fdqI^JU}Efj3)4d_4>LSU3w}m1 zGBV#XW(RWDG}ARYK@QJ+wNv^PvK%y%g>y%WMo%_3GiQ^Hn5#dqKR;v<=SSN)ZZf9K zEUY1Q;ObobeYAdcGcS1Y?}h(W`}akqxNi6Uxoh?BUxQAf?y>6=qSq{k_vr;x!_4{g zt>2iuy`d_Rc*mr!)ZLA^9n*lZLIks0jwAZ*bwU_6ofE@uK=1ELmuBO11Jsgb6Nb3C zi&{kGA6gBu&Y4%*kEL&39(U2f%d1b9UrXOSx#Rhi@?lOQzVyly7oJUnpC3QEYxZFK zvrjdjOv9VT9!USm4~>6(qFQ_Ug|#PgU)h8{T`kB&edAGSuKba7kT^LV!CvPfS>nhVnDvQ}+{^ijum-45DGg)WpedZ{D)a`9Gcg!PCHhyfiFlX`$; zOu~AC13zigq?iz|bc_tmNfyycy+J#|P`W)fYgHnYl?MIHn@P2W8?hL6R5yfnrgHJd z>+y;WImO2Cs(rh?V%(ju+FaYY#;EH!Buxf1tr0r5vGJ>NW_V_^jccs0zOl@k!R?-| zy3dN@DoLv*HEhXK8vou4GQP`G2MIF>= zVi&2kNj4cyrSwhQJyt|2XE(5 zyH1CikBpy?yr=X(ofx0G77coQ7Hj%_y0QvmZ1QDoh$`Kp$9mLA2$|L@T|6*Ji%hgm zNN4kUK$G}Rwbtb8aueP2ad|$#-%<&pAT9?{GopQWg_h+}8xL27F5#V4HzXWety#6; z5x%T_YNI(etUCEMIkR3k`&uVTZ@#kI`K>g0b3YY?_S*N_00Y$7yh$m^6wJ;L7i8pB&B*Rjlx_&ogQHzlA5H;rPhLx zybphe#xf;r7MZ&+(dIG9uc5i?s#RTBkcg4wsg*9LKlgp(cc!J*o?GuIrWRhK8~N-1 zc2oAK_-l&wGUi@I3SBhoCTw)po$1h)AB(J`p%;@F3Wc5f) zFE{;ibMV~v?yv)pktA46xL@BnA|qyAA-jM-98`DBKl=F#E)Q@Ewa_a^+u9%_k^wep zgMJ{(k?N}rGT_bHP7m}5clBX&aVM1ymnth99ZI47DlJych1Op^I-nP-TIiSPoOT(I zug-bpXUite{8&CvK%+w+h9x>SLY#d&;;^H&#(t>~HT$;4VMl9?6p?Z<=Wll$cC^;m z|Bl6BM{A7|9o9eJ_BiZltG{JA6$N2_>Um(B*|_4I~sj8(tX-ZeAc zetBi(WE1Bw;}TnAiYnvb;goPNY@V|Ws*^orIfJ*yZM%^c!e&$&fh^n(c)VhCgb!%?E@ zg9sXWpbcFOva3RNSLjb{(9qhfix~a1dOBiy^hjn2wtOS*R9anFTD8)u(X59ko0#XM zy@zspgRVbZjOn(j1l@=OI?Q$NKoOtfk*{7e=WPS5*;KO)s zWl_ap`pu`t_Klxhd1mQCdc(h%F!9*^+zZE6Mbe(VQJ*~Pg<|?w|LmUIC-muV`sLkw z?;e*YTj}S2bn?FGxI)aak;vVjxP#unU+retYS-xf6h<9#@yU&8C3=_fbhFxMkbIAh zP%qcxTD=>#y0w^_Gu2MF1!#t);G;{6D|{xt2w;^5BIYqd=Pd%39{cgJd;03FkhnBY zk4%#3GE%`MF%ZY4GH>i@QNJ?zz&#q$X=3kIo7WrZ-ORy8Xs{V)X~gg_HN` zPq_Zfwk#gokjBJAHi}<7vZ&r*_*DYX?EfR*24N9W1aFP5dhprs@W6&#x zcv=RYEgXBeL^TYI<3fsZfobK#54|Gh^w8{j;fFdZ-Ss0DKhbp;yd1nxSd~4#-dj1s zg@yHX?p4V1F4}SpnGyPiaGB?oYm~|4YnHjT!Y#aZoRD?K#g}@e1w}$OcMuXSV!%Lt0lGAxJ=%6?F*f;Kd_vK-ldbPr^NwUaG{PrwEEa<<@Al+`-bxY zCpxu<8q00Ai@WFj3(2azWhrc1 zzjQr}=@H=@RCJ&k0W7IeLpO=BT1gy&+9*9x4PLHy=@eOmOGT1I`?S13d;^=GD|=+= z#z_0PLPBlSgb*311<65xXbIwW&@vNZDU;O;3#(P)Cbw(DxYmdQ5}#X1&@RLOU}e>$ zZe9d~js}3-72KKWRB z>i^myu7c-IyxP5--u%nsEya(R4d%PdO!<+SLBkY#B?_2BEU}3+Tw( z!ezphYj}MpX*UACj*<{1vnoL{ZWdRQYFIBMbalG*8XTVlXFN2uGEBTq(iUTo;uY`V zl@2!@8}+WXlx)RZO6JOTy&K_jh~rkHMF|hsWVzmnI;5bpjsYLU!`zly<EH zvovBQcWwpfE1HE&AstQTK#V)4F~T!oRN<#m>8e@D@DeE_B#k-X7E4!7>din6Dmj{x zIr1Qr<6*)Qd??3JoLs;vz6ZsmZ~WMv1AR%4@=je_sot(p`_kR~@KXun<|9%uT*|Z! z*tsYSzP`Ks2^4nzh@<$6GY?I%XrMFpBr=L4YEUx7*clz;MSjRjlN9``BxtBb2 zU5Yw_7IhkR)1rXff?Zjpq&By%>;j!1aDhs>We!P4q*8|b>pmMdi-%kf`;^|OcL%sZ z>y4n=>BMonR-vc>7nmE^b0KuxX~rSFT)D^~cRr*jPIxf=gN4aG-pU)~ZR73aV*E&M z;EgXn9IT&CUpeVjvfK+=Sv`I+QOiy(y@?aAZan@%U>ev`H#v31zSBsyKHE)yRpQ& zM&Mx;7GP&hJf*-2?F~-aj>NV^o2`@Ex`$gF2#^|AKh(hdY`5Kl*_Jz%xK(e~nl<#g zdI+zje{R@LskMBE>-t&*f z_fKNu*$e3#e`l=pX{lLsk#W{$Urw&byR+FSv+m_tte^R4F`uxGt%-L}5a=5%mHF65;&2fFJ95k(wB|6)}D;vq`6{#S@ z&Sw`&3%YTPU}E{I`_Z3R)stnRH3Ic;R%wyB)IE|dZ?5!+)_|2hJnKnc+JX6TZ4cHE z)U)HPO{kcez8|!{!xC7D@aeoIiMG)s#!LnucggkM$|l?1^j9jrzQhA9iewZwWyLex zzLd*I0NiSF&Lk9@(HkgBSME)}`4bcS`%B641u3X2D3R^Zkb|`myD`8BH1gtjx|53> zh!q!Gu&PG$!Z&A2pVZZGkCu~OlSGLJXG;#v=)x-l6`t?Q3w(ROp2JnXw9Pvi9%sty z8APBdoVG3jHxs%`J$Pp2=sUgjBfzsmXJJ7^9x>Zs2!3n5?mC@vcmy-47p;?iQNUUT z;MNsYtJb;gYy91IW%q|{CXvVj0~aHqg@enuj9---nD1E3B6^2quLtAn-uQ1%lxDQa*0$H zCfLS#<_MdH_b|_is z+@MAg^+YCb-+!(}g_cI%;byJ3u}a~T-~Il{+m1@j%i?!t@66jFLoyl#llsNFk{L6# z=rU$Q+#K!Va>Ot0xRKHYQj)JgbZZb;vNl-baE1Dp>oWsW%qb_!GKqx{E8w%^?E^HUc4m7kR7WC zl(nNXd{n(R+lx-@K&#O2pX1W4Gzr?~3^LFc-kB4ElkTa{~Q=(s5G@ z3OwdY`tF6@A4$LP_T;`p*62!cRtu1nMEE-qw01wY8&?xD%TCy()VX9wuuFuql>4$8)U>0J#tf@S{RV@->8C!m7`I9muHG$Lr>eJ(gwi@%;kM6u@cubow{p!En_0jZG zpPwv!Xy&2W(i!REb4jHrhn2qm`N@y#=cflh|LW%_58WrvXP^6n)an(mL$PSkB^_wf zkA2@{kbdk-lehIVAkPcy7wavuK5!}u1fdMG7Tj`9qdlOJ;Znm}M`eultOi{+k_g6_ z0z4v@%@joaFiAWxwjf!5u`5M63_XG8GCHt^=iiLDx%*Ya-(9X0soSGjUp#a$k8) z7)-7>IeOSaxX68C2ZO)Qi3rB6v5GeLHU?Uu5Ja z$LIvq^{cj~f5pIu^|(#H`iaRnz41rJr_&#Keey<^uT_YT_!6KDF-@+&R`skZi=!;%~sXXa$C!tXih&s!s&N{6zdl!#R-mjUs_B5 zliwQuh=rF+IZDji5kF8m_Y_6ndnlYQ7EHPPIaF6HjzytWWyXF38TKe!Rg{&mQdis$ z#Clo{iV*~jb~h&FJZUBUMi9mU#&J}M+trW&O1+YZ_m*(4)SRH&ES`yaNWo;T$Kn-i zMM;@ra@+wWpwekjv$kE9XtzXU?`E8s+K@b0Z4!YYy+5FCLJdy?;0@@8K*|s~lgGq32QP{TGCTK-PVJayQ&*@Tpk{4c)2_lvf)v4-+RZpCU?6sg) zZ%e6%T3pBR*`^F!Oc03YpOfbs9kx&*6cmPydQ=Z%ZW&Z4P|0&agM|c5lIqK>kQY9y z6Hp(oRmMq6%?Ek^KP%7q;LN>legV)CH>Yz6Sq%h z>V6vDGqykdvC{54(&fv0>gm7vt?`fV%gzUBn*P#nPaa5r=5sr~kp9lOy|1Ug^izBP zi?npl#QyXv5AFH6@n@TBuO|D(OnrT71ZeN*W_`N4hv{lulpiDw&O`mH}6JDdLT z5AFMhJD&~KPo-~kxqQq+|I5Vc<@S{`uV&xxPM`nX$zL6R=+UPxJ(Io_O&&|XwlKaw z{n5X;?;niUuY7j>n8fhh`2Xc|yKhXtc7mB3kAAv)Ha+#3ozKY}yVG|T_k49?{h?>h z)L+Ye=@UK`SCRkBlRIupzy9pbTPC7Ym1oIzn}22EBkBD&;~`({ zv@1)WP9H3Imf|OC$IrIYKYndIb2gnk_54$xek@)7y&WH)c;)%}Gmk8%zi^ye6&F7J z{7UPw^mqSTlq&T=J^p-fdE;!l_m}qkJwKOR)$r5Y{DZq6oN(jwd3#Ug^N@j#p1yr@ zXEpuZKisivPgOqDY2?1P`>-Y->U{Z=H`LQN{`OdDr_7q3`XBe!2xX6+)f&ly)HLMvkYr(GAcuYj8cIxJ$292{yz7s`8c7DXhPlRxJ*?;;$j5mfSrFB^qXYq^2(}8Fgv8B2p0g6!iBx)a&$&X`7`g`^ zphV`uyi$1njMahX-^$dP&~ZOu`h1G9#Anu5w(!uVF27nlI$!JT&=P0!<;`ZRn$6e4 zB@+p+d)JsuK5LWCZkmeq3}kmCfPqc}bk+9#&eb4>?;>=?pP(*x8mjj;*6gC~tX4&* z9mZmejMG+WR z|CB+~&9^SykB>x}Gm`J!uR!aAsktx>b2q);ov4jnO1{2GVyVBb&1gj47IyEYsvNEJ(Mh9HdmV3t8(E~etc^F1^t#qs zkv1g2eTD-3R7`g5P!a5cQmAv;$56;AxbDv zT079dURro4shY2P%UKkarFWgC8hl73US~Z~-xKpvpe2 zWE2`2B+M1e+~=$1UU=FWKa9HhP;=xio{3kg1PGHE{G#5kN8pL8s26Ul;?<~ zBa7mnb>9%>ol=Hyn30DgOXWBm77Q)IVE|bmV^3nnRkl$v?_i5IYL}`@2sTWDKiOl>1EMUBud2k!yqyi~n zH-35fXoKzXcu5|U?a3c%bAJDwiULH9t_A3%>iAef`u`7W8rsPQYsTOVUPUEQc)xeAHbj0pb|a=AV9z!h}RUV-5mF zj*Q}(3Dc7* zA^Im+E}lpZ6HMF5E(nZ-JpSv1$O{kVbr()$M&9w6WYI?4w z=}ehIDjHwDFnoGSiGo3znyZ45J;(?7>L4&v+LY?>dZVtaU;G%Vq~&uI2UIR9_>_KO z*PdG&CPbyjy>d6xJxvTVNTz5O_9*;L-^D=Ch>*x?|Gw~BQSqsoa5;ZlP@=d7S-sbC z4U-Dv>tPhndkBcYK`ccK^i7a+F?R#n@PyX_KQZ_8#Bvx93@2P-TZ2oNqS#=p6A;V3hj18gtNTLOp_4mBIzo|3~Vj)ygBZ;3Wz z3JP{ge!*FR>Maq3yLNXf0@-3`axk!bhjk9A(D=bGZSSoLV{zVGPm*kI+pUQvLBYO6 zgB0PMAT~kou$faFKK6`!w_oBJYepR|U7(KG`b9W4Yjn1F&cjNj$EDB?=%~~p%tp>i z&(+qrw$zjUUB(0+<;*i4*as3_ zv*X<%ihl*s9WK92n0{bP8*7u7$_b0U?lKm~&(+L*Tx>)M6h#CF-&iIbCM=({5hwgYnR=Wzwsif(FGcF>S(a-!W_ay2Zd8n zwwb5!fzm@3?j%qn3fV}=Q^8`u#qfSWZ;G4{+rnugUTeu2O&db*%O$t^l4Q>ok6yUl zJ+pZ;>9J*Ue-0mhSh$gi1=se}>kg9OQ+j!s5{w1VjBJN$t{hp}s3C~<9!Wq7oh1h{ zbfx@fprdUd=_51>4}z*Bxm2?L(CX+qTB5@?Oh*dltmuhF zcSV{t3KWFp2~g2a=hBb=;p9h4?*&h-pWhz@)*x`^bOU)q^bFE-!+k$x1?QxVmL8cN zwm&$IP>x-m(8E)+QdX;{*ca|lIucm3QtqG$)EtjdF3xo%Q~S*fT1_tDtMYp`)*|-x znp_bQ4~=(K#<+ZCG3KihUvZ@*E-K;9>bQaLkhghoZC(24nI+5Hs;XmE%XqkT>3n@H z(0t&!pg9r;$#D|wAH+eAm<|U8riTxo)Ch!2;Zuer>XLa2REHgjbfdaut2@DH9^<0G zH%5)Ba#S5}unFrgj%x4xy6RHP{UKs+&ZuxGLbiw{}hH1At4a)-WB0cF^T+CxWq^aH5F3HL`cLCa%*1or>>b< zRhva-rNni75FK*r_5HU+c`LX+3%QS5GbgYDR#e}F7{VoU<}^rZP74t&Hv?Fy+7#I< ziu^^forT&)JtrU)T9$|L#b~bC;y7t7LWZ8_sl%BofeRL!PcyD{phqa=BByjI|xHf@;<+O3r^*sV_Qgr z3%hRVW08pkn}@J6-6% z1yqFn#1x^Kjs;9sD6T9wAm;dyxJ^k_=*w6p+)BYboP^5$ZCVy(H zC-Skjd%~V_Pq<7arpbrT7V;uIWBtlXa#ZjAR4cgve?r2L*N-ci_BVasR_L-Nwq);a zg)d&gkwe>>PhAt6QVq=Xs*jamSAuj+QLHxURf;`luN!-^%}sBKKhXJB_#;Oc%nhHt zKiK@|i(4P;8C5Hs5vz9e_H5u8l;c4@pzjJ7z;rfgCb*&mZiJHg8SVDRR!qT+_(4`+ zNBf>K=z7n#aJ08Fko6fc3IL6hTt2qMf1hzj?J<%r>{EoGov*9~& zrjO`-B*no9L8g|qt(hXB^6|W(=Hb#}r#o*~1B$nMCEUN59ZDfnbh@3*akrrbKmwFi1S?Dv@O@`&(LSc#G*DLii6~f~A zvcL{yXrT`g*0N5V2&>Y{9V=Xd|ApOXR6w=7YeN5g4Qr*B5Sf&*L;#iNn))|vtUOn{ zdfT$V2Me^a6{!rFNCRThq0}HX%%Nb;=KwVe6u)0~)N4lzzYo5f8(W{mHdbpt{ZrqV z*dx~IAipe?WY-1;`@Nx`v`U*fXIa0orwnBFd#$u34?WX!ssGj&s(6GTLBE9BYHAl6 zxpqsIymB81(a?`-X(St&cAvom*NO!J#YrL0kU66nKiP<*Fhg530|Tcg^#{EP1#pT& zRi10-u=%#2$ex@AR8;;6z>(Iq@AUe{BG*dy zV_@*9F<#F(jRW7CT}wQC_=MyG!7t6{<%P=`NoH`C3T*C@rhTbH=3?)%@H-3_5IT#) znQSMVOMZgjH*c%0F;|-TY;Qw&rG&r2r8Uha$n(UE6)!mdgI+pYFdx0}+%>tXyBKqu z1hFs;rFjaI0nLy=&gg7zt#kK2`(8Hgd+Qh84~8oaf>Q678nzD(Y7ZH0U-7ntZexA$ z>fF{QEe0l}ZJxi9tSUwYPMD-Lo6sNnP z&Il5c>k!|pE-UqmT*kM!+*`Mtv?ByhN*C|~WE#J4HeBhNLuXt~RHCBef^X69EF54@ z;}2DGb{d~BC1I=>Bk&DLH^B+6epKNNAe%s6K`Oc-Jn3?8^<@*wmuSvt{+VzIUrdMK z78%KcpS-FFZ%CM9tBjRcVu-a~mQdEJrXXi2tR%zoI^5f38!h+~?wPN4#@$nsOC;L6 zp2CfB)2E^`_x~T^jzg$-ni^ z*S8T$d`|v)ioMc7J(wjdvehUE{FiA>rt&)kng3nyDp)JkO z)$0+Hz^+%>>&juZp2BysWX<)I*&@G4wr9-27Jt7Lf)JHJQsPBNkw*qLm_O6&bm$bW znGhuRD_^J1Ly_fJ$so}Yq;{YSud1`uH|Z8~EaYR_GOoSeSD*S7LvL+g?@LJcAtA*^ z+<@4K1XaRDn`VJvS-Td1PpyWO9i5UK{O>l&K_VaN@T!G=u;)R!o-7<2&qsbQ2F;v|(k>I=q{*r~{QBiM?KJIei72Q@t zxQJ(Nu8=QcqiQu$&Gg|3L0!#rQxc&K2nJ#n(s6pSNIr{6Bk;n`=cG>`=?P?{#Bs7h z9s(1{eHj_cJm?}#l_W7d)kat^^$lhcu(mjPCVaEPHtQFAcIVk%wNwr_wi`~?>ijJ0 zqZbREB6@ATs-lXnN3{NKO|)CUVCU*~xHhNoTbnykj63R-ydzCqL?==;Q@tp`W^oU7SCd>1w)?nR7(5Q25c6s95rngH<*1yd zOR{8%Wmy?cH2KTiADd62@TESs`3Zef9|L8|bNx*YI5&7cQY{bE7G!%)+IA;l3oazw z)Ht;B?79Rsu@3OHF82imY@ZStRRzVKS7X32J)e=~dxw@wpFCrS-^SC zklmaP%pJQe^MUg@h7wT4yK`Mxyx5Bi`JpagI5wmp7cx{XT!{<~eeQ0Sy2}CLz>o|* zzbo+7r{8KeFes}oFy)i@*u|h7NZpl`e2}E@KKOzenB|~KC|?BGz;Ih<^18vY80!a+ z%Bx6eiZ%EsHw%Q1k%gelqqy=MhUX=ZP=GiBhN7AbC>jT!?!w$B@1+@1pA*O5r`ZND{;l?q6jRZ*fxL z9?a+xYX*~eNgtLh-m6aHeNh=+3nXF)pofSmJ@t0okrBGpmBF2JOFVc z?qa1S?X03l0}C(;n$3_x8r1u!cFL`w6~xr-sAJqwah|Uz-zg@bmJFeaM^Xu^^;R7} zL`35#S{hXn3K*2BqS0!!8_lFV)@pRpV?Vt2!;=~QtM?}T@|Sk*%8SQ~v*n#neQokD z-7vm*y!_-71v{{I`sCSH(l=+uHZ^cM`*id8Y2U!9*}3>wS&cw#;Pia7{P>bMXPix_ z&pvA9BJ|N%%1C@9{hPmz%S9i?3N9Ln+z-YAZIyGGnC@)Hn z&IZNtgG!TP`%S8TMAdrG#o(ok18uLOI8NUkE2JcUlfShjMyW~#a6Au< zg!ZxBawn!pf854>)d|>B6jg2DFNRYmch0n_`68}}Y5=A;!zxWxyUj4{*6=wdov7Xp zlb}-Vw9~iW+OhvGO3Knu1m8tNy3jV;ZMt&RI`mThjvU?AO}1e6r-)x z@UuY>_|lqP+Plz=mXfwXmEzsCh8}zg=UfXnL0qT)IrG=Ml~$8-G4ve3Yk=R*1|GjS z_4bY#90xUe-QqpzMrB+qT?%(M0B*D0rO!*8bg4*NtJ9U>t>4(U|E?BI(&|YY62#L| zp{E)yz%ET>I3kMfbZy|Ddui$~-87w0%QgyG94H8e&}cV`K^Qu)w3=lau(hL*wh@u8 z_^x)VBZWv}>gSegwMMI2gN)0x-KB5|NR-}qaO&_oPkqnSCw5HJuclrWw4j_xwN++J zP!Xt6c89KU<)96ts8^fmn~zNGFU4`0{$*j3RH@<947*Ixty3zegRg}`{`}Nm-Z4#s;E=Arv$ z2p2CE^wkCFXuQ(`a&1ZsbsDXjf#c}wV8$VuKCoE2Iy9xVU6-;wkYzWibt<^3102m! z1QDmFUYa^ks?cDk*$xth0TV+;4q8-UuCW4!jw-h5KmP+$KQx`R#8XyHq8dT1T2!O| z9fi`9h&o1;q;JR6MQYH=IDPuc)NLP28bYSjMS>#a+{%qkD~aMnjxVa)YH$9tQ(xOT zU2OteD%}U1RwbqtN4XJ|=|9m7C|C@W4%<P+fW~W9^;_scBorcb!o}^6ec|=ULL4D?C1M+Hh z@$W#cA$){(0CsYK91j|-G+-#|Kv%1*lVtLUI!tZq{->utGIif{4Idm`2E(XQZ7~94 zbm#~GHC1R|(uwNXdE#_RJ98?%L=$YJ2IvhnMsC5EL=uko%45GVIdc#Xo_Z?~h z&IZ^~kArFugrIo0Q*Tp4Dxk7`#K~17DA14>=Vq(i0&SXrqZ?N!$xoA|IHHbzD-O%u z^p#hqs)wX16}+KM;3ABuOGSqiczjK8t17jvsJUNnS9IHVH7PzVg?y;FPyIR0G_HWJ z@&;wGsY?CU53$6M`dM|LiIizKqZrQAX+}^dE3DvnWHO*!PH*_-JqJoHP7B5>Zd;0L z*V?!&TlAX}d4S^|*Xyyj_uV&LriUX0&6x%4p(Y}{74jmyi~`xMa=S^Opjp#h-_?j{ z&_uhcPP-G+v%4t`8)@%Ytyf#HoL2huM>y;xf@4uwt5TzDOhSz!FuMf-0xLQ`1WCO^ zW0gv1CzF5|U}&Sv( zG@7**%0zneqmu_9#5#q+sE|(GE*iAdoAAyW)x@dW4M%8)xrlpe6@TCbsgsu8@TsXg zr|EpdVKs1BgGSXTg1PZKa;?HqS7HPQ)x1IoOSvu0FKIPZtunPVv}gqp;H5hk_*V;2 zYJO+m{UB)qF*dukYMpB3Eg+H5%#uhK<;-Au6)HdRwL?;RdzGtg7#3VALg>)EyH0;A zD4p(`)FDg1>l0IzX?DU)!lx^=_pU(5%^GpEE-V==51?>#h@fS{21j)*;p&8(L%Gt5 z0@$a7%__~LnZTXX@40g-n1%-E-`S8J<88oJPO6QH2we^zSp~TY_dOhAk929Vu66!Sbpm!PZCb&0kE5PCO z)F13S@Ufb76^+4jCJ_!4+zwnTu2j$m zKzP~%Au?&!iR$w8R}dr=%xhCD4#+nvwQh(?7PVn@U_=*1yB-NoIy_CcxRx*=bSm`g zDy>gK3fPw^GY3Y~nj@&w-J|Qs6L=xBpl$>-k=8QA!T~1m67fKTaLKZ_JaV?}{szZ?fmq{`B90CfR*2YaAhY5L{gAhh}C zf~k+pROsx21nss_ZD3P0afN}xQk!T{Xr)ONPC_UIM)t>`5p0!iO9Ax(I|0k&G`lEY zsB_3J=py}V-!paZbeZQX(wC{-j*z$@c!hF+!iH&Z4{*b{Q=@L5ab)@fRKP2ke-$Go z5ncjlQNHTP={AxsTk3QTN`TVncqRH0eFs=9GNegBw1ZrdI)-evYvZEEf*jQ*c-bHz z9fU{@#65IG6I|%iC>?SVQAEELBz6nK2=3RdHz?*tCxSX1?I=9hieekG43MLQ!6I!z zK=TwdFjyXB3Zb;iMwV^R5sX2%iBPM-KaiwRLt}11)vZoIgIyHt#N>P(45OGFoC_To zIgJ*D$ZaBy+jT6kSegAH8tgRTl^uZJ6;*-bCe%wmuTHmuy4#f=3++bwm7m>rpwtzF z1L#$PSjd66nuN$B^f{OaJ^E0sVMm7`90)I_t|5A3yMs8Wqm9RKbU=<`O#M2P_Vo4R zs5q@A?RP-$s7WUt7-NEZ8=xtR=2NYtU!9nmn{HEPG7bqXN;_Nhcn*U;xNwtZLrdX- z)U~kEF|z{fYO{7W+G=A4)nm?-zCsvKa0P+3m=z#=Kt~BMq6r~L0lfwi5LSog3~K`i z+Vsh((N~Z_UJ3mcf!;<*MU9PV4uQe|ylF#3JL7r-vqO{*7-C0sJW344Iy(bVs zaR*ujgfuxu*=ecGRiav7NF_>Y`^Csf`gTAL?3d0JU|XAbyb$^!>>@Y}DBg+TEv+`{ zen9O=D3r5mAd}G#P`1C`pE^)$#I*ORBdx&pHcD3&yr^R#&;h^st4KeK49*(f&h*YApMexD6K-U?ErCt$i)t6jRmGn4l3E-4 z6WfQLZ**cdjkZ?9Y=)MwnJ`~E^t7h`NQYh}EukPJM~70UjQG`miY-Zp5)?IP94irH zC4hU@QFF?W6OWZuZ8}7+5wtaQKCI1dm8(`!n+_%cwh49wCsw1yQY}nhKegkwyFf3| zdazAIq*BSWL2GV^B}Q4OCT`Oms*a*dbO}?Vb>a)4_oomwjj%9Qsx?}7BfYB*Nm2M3 z{Hua#jZDD`!D^}?yvkIe>;|#neT33O)k!2deP?2@7|em^W2|>sbe&D(5Q5c8hDj-) zWhWRZ#x*0+rUe5++)5ZsSjJdjqJPdnoo%EfvK>`{uR0;TmKLCG%&`_`wLlbtMC%f< zfpmodz!$MIB_W9Z*KIm{RngdK)7P|BHiBA$P-%07)Xs$Ez`76v2)zjO2@^no@fGT6 zm#EZ=AtL2yIb)c-9-m+Y5(#PFFa#oV2Fn%3AqqIMFaSa^07vsgkzsV$PS_3&%~4KK zY2XM+6V(8iBuI53F50hFE1kerSwqL7O>2k&PG2(GoJH6QO|&pfdxQpT6JY`df#wJ9 zs&vpDrH`vTP^K$5GuOq^!*NH;R2wm_qhA6bSO7i5yBJJ>7Gfx4%OKvx7g2*l+alEz zs*!{RY+=geZ-DnzfdN?!*r0CTjV`uXtAgK;v%$ioOFOIp{)SY6dQp;*60r^0qWo

Zq&hER1Y>C(MYLho;tuG-$B?xx)Y%q=ggYQ9jxqZGqR^pp1XZVR z;;iUE;uhvDY7fQ`{tGxQ5HcLU%5KoAw%>(c1esixqS@GbuxAzwb)l}31qepm@PBeN zoBBnMZWnF%h?x)0R6*Vj8C*1_#y3fKEyxEwhfa*ffp9)vP4iht+ifTouMh4mifkiV z@Hlm72Z{FywFd1?X#0@%r+v@%wCSnF)PcLmy@Hfb>0*Q!#wVr@<_*FDRE0j%*MFUD zUuFMV+cu6Ox{IN=q2M8tkt7gUGY&-P1o)D5+lRJoZJBkpOkbH1vkxy#4f7iV5f?Y^ zrWhZfuog4}sMDSHRrbELMb@zj5xdBH{Am@os!SgS%UV$>9F|{YdrbAK-z7X!{Kj}G zIBwVlmIKM84dM%x3a-%LG^7ZFhoauIE2WWQg4CpgF1mkELz!y95isQ1Sc$mdP`RiI4ce?Fge6_a@cp@hhM{AyHI>%^teCzIje^GXzo>oeU)t*A$!su@AZ9 z2?;O zQ8QOt@IO8E@Ae(2&=#Nto7T7s?h;r55(~Epo(C=h=o_&ox3yt|IIAp_DmPKE;bKV zZSEm^INTiEV?e3&mg1a$@88X&u=Qu2;M~{(KefQQ-59(-%?f0yCQUG77jUWjbNo0U z3%j!sD$WG~V-4)ODFGO*4}viSBL;)N0m@7PA;@MZICi#um;j&xPXYLJ%QGXSwPx%B zs}UeAl1>-`=m$mXWom*%3_B>WeT*;>J@g3Pt-$?Z{KW|gE`i=-D}OA0V8gRr@1jO4 ze}!f6=jV76jyLe~H2M-l0+)rGe$>+2{~MzJ_Crg(H0It!2Sc!ccT@uaOPE$lmdr>7 z;(E^R3i#*i_x?SS#{h=8RF{djaDpj|YKTmMlmHmKkWNBN{7$!I$UH1AmVwkw$ee>i zM4e*`QKU2J8~EN`C(RS~^H6Qy1at-rj9uKZq)-JyOTbHfBa$UVz~Pw)LK&_%kWAD` pUzX!hxzefDC*$}^xzFgLlwMw|ic;!*l{}sNX7{UFH~Qj2rH&<&MGpV~ delta 91458 zcmb5X37lkAc{iMDsEaZq!zd_3m}zEUWO@eX?hCS2S65Y6b=TgtD#%@XsjjWNs<*sa z@_o_d(^rMuH{3*xk$l;W3PDAKZ0^PdG$w9Q(8L%OL^K*TBJcm4bGv6mz!~`QqFl@el;Arq*D{|TBprT2Rr#qhDh=wNMAJLUX zPxc(wP%)6Xnuie&3Zm>df-EVrX}PK`ORjAzw&OUu#`6kqI=tXol0as6P?U7h z)?C?AB$u}=)sjp@vOGt0Emd=5TXi)@lSi^c8fzBDkxWz7Wyvut*->R);w8;cpbJ}a z1W8mK(V~;k$>6r6Y9epxuBcl&Z(4%j%9;c{DxRR38n0La>DSP7UNa5LmPCseP0`aN zjh7t(Ei}h8MQP+%g2c-*Y0RSsO%@fwH5@FCw;U4+&~$Vyd8%t;DY_?$Dru|QmS>=E zi#J8t)D&Hp9a%IL&(m>f^CFKWm^xjYVyU9w@VcnVw&&O`uj;C*8>%7k+F6d}D1sns zjwRXHlPkV9cG(>t9osuLAz~S#r>LeW>9QpXn(Ru3?MSk!ORg@-hG2-QE?9V|_+Mj} z`IkjsbE&QIj$uIAx@EhnfPFL!#gcUFtt{Fq@7R)LIfmxHEBc<1CToVSJC?4PIyP9x z7KpAcikfEW*d1LG1sfeuJ$lfyYly@Y1lO~%>sTe(GEsIdUQu~bR$<*5QH{dGOjKEv zEy)l~SUBv!l6Y6K49QenEZ8&^MH*q|2M(GlZz_T+T7sy{o`{|;Y@H>_vW_8iMH4hz zbySnI)m=@5J;2LxLuM$3B|t^|YEY(=sKMIvo&QN(9kGBw@s zJZv)@Rf01pf};t%qFaJuo9K$Pg|FKt{N5D}Nr9nRrh{!aJzH{JVLKKovsPkITfE>OOatAyuf=dti!~P8?IoQ zvMp1s!8sfruI#CX;n|+a%Ytg6yAfKtvS~|(thk<`(Z&XENfPW*bsUApCkOVe*^bA< z(6J8D6?j|LT`~znQ{l}zJVVe_$L2K;h6$_3>=jklY?CZO66top8eDiDd{E;B)kELr zNK&!ehAG*+DI1dRDITH+=@_nN8@wY6*bh~6RnvB1QHrPD7b0rti z$VRj`sKHydARCrunIlXQ%WzEvUd@Z1DI*Ygx~xMPA}oezf_Du8vydE35hYD9JUE(b z7!n%sqJc%i!w|G-$1;pp(_BG;rx_#?%kU6ORs~B|;9;%||9~%2`!RUk9f_(f!LD^4 z!3nmkiH2$Dl4Yu}H^D_*hnbT}h{#8>U}%_>2_N=kM0ISIfM6hKnu3{F60v*I*0G>e z#6U$96!MS<2em}k5fJcH5)M?A#~O)-M+aem24c9Y$nZ#>M>uvp1Zczx4O2D^%@Qpe z*^YGVLF1;VBMBfgh?*+ej!mMKjW{K!FmOTE9a&=TjF}4()(eXiRZYfC#c_~NO%+iL z@la9>T^UK#*d)P_G-M6kk`2$nEOkZJ;Al4Vtik{)R<#~Z0Ck|oXL2UU0tEI@L2 zII!y>ZVQMk7R*=UJ;6gNbzKBJ7BP{1KGny27vBM^HQ$v74z$IWwLv%BtPYMLkfsQ;R z3dm&E2%dqND<(h-CZLOkO0upjJ1#XKtk;EAxriN>tpPFtQ~;n@n47BrmjOPQ9`Dky zZK`lr7_o;q42UI&wjqj0iOAD3&=Mk;%#jW_bLItGLuL_mQ}t{`wG<88 zPEsVCov2L^k)A97Hxn9GOc|IBsm~@D3F`oovk@|Eo&r7>nv^X|(|}M!O@b`~IYFZu z7NH`odWNk^VR}ZS8hN_sz-1Iohvi!cIhKLmMdSt`V1YdEsC1uI&5#71#}WmA1zAMQ zQIW?9;((PB1fcQ6w#bA{L^j|I7&Sr~(VeH0j4B~?nk1CMBLxdfJ#ch{@M^$7(3yaw zExU-Yi1{{8@RNjqAZmguAw(P0CZK*KBF)1R1uO<0j3gzgDm-7~T|g8dB|~(nO~8jm z*M@~4eZg;JTY#Q4tky<;0#24}_|nK!={_qw^(BI<5Y^E=patv=XkSGHu~ifO6TnAS zXGzG#5;ltm8uj3Cl3_`*Mr;b*AU{bmV7bGLm;gN;NkW2q$)1fwgLw$p97nf2OBs1+ zssl@rju99UfsqMp55U;64Ok)ELl%L_Nk9>?Z*VS_loS^Q34EPbdBd|@(UAnlv`rq6 z0XWFyc^6S#qi&~aXy?F)5j2T6A$1F;jLsZugfOS*rbE)ONK>wWRH%El;lP%WS}Z68 zR_=+4WMXU!Tc^X>6{DJx_Uj^~0)J^x50U|_ z)euoVj0kYyu__UW2~O_V)JFlpOwksgIrs=_9RzehD--sE49P?3yeEl8s76ir2Da1kvLacvr2CDYL~2{93<2`lG;pk?5FRRU^oU6eLZ zvgF9rMHQF{lC6%lm?M!igHo|qD4&2N4MY>z2|xkcXQ&+oA&{`m+pY$~1hC}=9Vi#3 zgS7*&dngfz{m6=h)M-hGK?INKDryBqgX_ci6j|Y6BM82%h?G#(AiIy?1lWEgLsQdG zRoQ?d$S69B5d~QgRUYZs17=1^pt_c*VNX59QJ@$bXhwk^1Po*ez?*bCkVS19wHHbx z0V?(!><1FCi5%?$+n^SOHDU+i2t3$v&jK9b zg##m^HXsNQu}?v4gtiS^*BsP_a4z6wWNj3VJWwm!X9WOLMZKT{D+ACV8;d|w(4K+v zo!AKSwq;Vc<0VrEn1<=ef(vwqEl@PX6jJmG2<4({@*~q^U=YO~YFW(B2AadfQEUR@ z!}btAkeD58sfH?)dI!>^tavh#Ir5xG(6WIDf<3l1#A_g4OhX3Dq&A@f+yWUpfNRL% zs0ai&w`d}kqYrUpI6P7z!Yyemq7HFL>4YEzK)@rk0SKT8aZndY!9f&&lBi7prvhmM zF#+eI*g*o4QHmf#0AK2e3YZG8C3QRCJ_8+qPjQh?!9rjQZHx~5rI|o6h@~RznngjE zSK!{R2%iM@br4u(v=v;8=K&p)3Kc<)g>B$q4|xl{!Xo~8jGA?$mk{u7btPzV7> z3IGsrN5}G@Q!)t!A;Ch432^LT*60tR1Th7n-|$eZ0bJl|le#EK73?B<1IhV~FKkQ){N8thGk zu>qe0q`<;q*vO|?81e%)hs6nCC{zuAy@>5_3t$gyqs#*jYce(ur3>I>7_0~|wUr#` zB}7~hBPgK2X@F_~lD1G|s>lqM1%88QLIUdoWC_=>kj=?9BFL%0Ouzt$0tPS+aTU{7 zU&N0r1zE%+TK(IoDblpbugl!W|pu~e8Lvt{1GIa_4 zVt9-MI#9s=*&~oa$kC*+fQLW~p&$n^2APP-B1#}p!>zITF7^?8f>Ktkl00g$B$r3`V0!vb0pCU8?iv;!>!Q@kL0w~ZO z0}mLYXG6Kjj-aB*mf=4vn@$JZf(ADRJpS=bUM(9>RhXN<~ zkOm?Nk%gd8Qv1q)pJek8kyONFBu~Py&<#gb4Xy^YAhHetfxuQksxW!vYOG8cxf+RF zG&KNIlByKi;(>EOK%=GsDnh4V9aYpEF0!k_GBuzt!U_mY!vQIV^oKCzAdV|Y$tb;G z2grjc#>pgbLlZ$vAm#wRBJZJe5+NhR$p~m1^(x9$Bw*4Qs1J3miFhf=0IZ+~h+(7J zR^aW3^C*o_8B26Kz=wFy3?4^r0sSaD2!W^=0B8xe1o#931?Zuc1i}H7FR&B18H6Ql z9{`>+f(Yz0bx#(yFL zfgXg9!k0C?2_Ww%0G3d>g4*^lC6aG|pGn7(M~W%{FjO~yv8e0>@K+9O1b9pWlt3Io z=%auP?<^c$Fk!5K9VFkvV8Mtv*m&e3`X)n#e=^VJu|FolrU{3o{1YlO6EzM34Inu* z3X=f8&9)BF5-EvAx0DhMG1;` z&B0=A5Mb~-zy|vJ=0t$ssyE52+QgT5xgiHpWMhXP0g))WuDeMs>3Zi7AgR&yH9d9ncDhnR60BYh9whgp1VOv~~y6i=TB?E83i&0tVh=s@_ zHfDu=5eOw}A)}$j0&XCy1OJPJ1)2vu0%8y{2B;$2#kw#5`PJj|{pfY)f2ZEe&m4<2& z!dN5BGG6h4L%{OM`yi0sNMQn~8!Sbo0RSi*$sP+N3D+GV27p4uOA`XJMM4bV6#+^O zgBm76pXA{607z{EX^+t1z(L>(F)|z!L>WOyIt9KuDlXs|(>6exA_|&#&jhH47=t{F z5(j;P>~N_~U@PG{mTLv)&h0sH$3@rv#c%sRY~B`p{-@79a>vhJ_T$rn$8SCJ$Q_9r zK6zSj<%jnixdXv15}fskJxBb<-Pibk=KX8%_-7#g=lR3YU~=E?BYvg*n&7T)>^Txl zesj-J|GLg=f*<8~AGyQm;<X*5B3%KmrXg(h2rSR?BOdmlXGxBcEEal!xSYbUs5v9DvaALFvI)m6-z|BDZ! z#d6#=DqQ4IB&0`K{{p(@wSZ1SKbp5N}jR)&x zeIZ`;|N8eQII}J{trh=WpFnHb%`MHX`)|MJ@Oj6UGx|oML|(YJ-L~bf9Ju1Zv4vxj zf9vbc=ThK92DAQUuRFp?{Aw<->0gh_O2>4EE&olp%&AI4p7rm0-2}HjZFiTqeDm|@ zKb9c7PF$PqX=GQw@cIdEl2;l7+kZ7KJ6>zOwd&vY`Xih(NY-@4-}L|*d%aH6 zSo3cn*E36Vg-p_aFS%|HmlkRn|I7z*-K#YFS=oQZgIM3v%4R+`@4uJG+ns98UGdNS z9OQ%IhHfWP~FlQ&Fovx|*bzTofrCy0~P z_-Z-t-$1VG{8Yzt{nx(n2$!}KUe@$?eF43s*W{%b_TY^Z+{RXm7n=UAe}*_$PY!29 z-z3+)b$)T+`n&%H*HtquiOc>w-Za6Lw$h0e)&D9k)y!(uF8I5@h!*meB`x&*D{q?M zJZD`m<^5$`mhF`R-}K*#%hc-7T$%LEFQIWli$ zMvLx3o0pgTH$q%YTa#M_{~lZ>TsNKJee)}5F_2a!x9a}>KS0-OT4}B2=W)p=JA;15 zAN|1*Zeh8ouXX&}a9OCX6iY4t8@TK?Q**Vp|ML%_3!{_HWHkT%#DWv89ItQqv-jaT zwXP2Aj{jF5!SzhSwri9A6XZJCi_OLR{u%%02-nWfHoYbP^52DcE7gzH+x{ZN*d zFgNH-&idoGVCBi_*wnOt<1I(HH8(ZAmGK|F1J2>OUETI$?>TbDF;U2^N-_VB-h-a= zLUkz5`P2V^U60Q+>h+BO7XeJYW;EBz{`X0fY}{?m&-wZv9pN@}nUY-a|Ae$w)TOCa z-v8r2n&1}aW>eE$KXVUSE1T<6V#)tGS}Q$6tIGb7KgPP!OWDeb|8iVvv!(pXhF>85 zz1o@DkQM(Ph?iEg&7$K!hRe;l)#>@TKmO(ku5G2&<)mMDGq$5RwIsCt8*!Oe=F`RM zAAB4=%q~gEP0z2A>+0O{X0GnP`;)kiO^Gc<_y6iGM>yFS&e>D`mvGtPm$LP;|CL8c z3nw!tH~cH!dIT=Kx#5a_3zy5~SP`-6t+*8P3(nlE|F*lZSfN}~b94R^5U0z9jjrHd z{wJ8IGAStY{wr{)nYpw$>)(XSx~gQ0W&eI$T6HJ8G~@sLe&SQT()4h_zvecKEyohw zq2#~!Z3s_^Mo}>Rm;EEe*5;tUw(h^}?MFBdk!rQwjwYca7tg%pt|CZF6)8H1`HyB*iL1yh(D`p^4w zcty;f94`5fkr@>1fdfzIK%8;9EiL8$DK2|PavARXATHyrT+$i(k-xz9wXL@8`ft7S z@cGB)+*(o@J~aw2%#kpBp4`$J!@*N;O0zUZZ_e`RUcuk)-njT2$*|Vo~sC|LO>5G~ydmmj6mzHsbYWYvAAiFj{0vo25a= ze+c41I=73)?IJQ~Klofor$5R#|iW zpOGfB?Nv4APy8*SnwOaGZ}@lPQn1=WdlGU z&#iXsm@o$%aM-`^?+$UxGq&01fd%qhb+e&1dw>FYF4<0RR$vwH%yYK8zPZssI%MtB z-EP7hA|+1HG2*eU?pR09DM4ZF)&ZICbDJyW`3Hs5?nbV~#{HwsJ`m&Mj!jIUU%Annm zO7MaLm*Y3(r3F}WffEMBc6|syfvMbnh)XWbi?Jm{UiLBHnAN9;h`t4`JFjP?ID#*` zV;cS5iiH4-IpaBgsyFSr2*T`Rdo6F269~kt@mfirNfr@-^PD`C61wBRAYcuLxqK@% ziy&N#=2jQi(=t z0s=p4B*Fqb-JiY-npTYVOdf$W&*cVYDN#q@V;z?iW2>A-lw;52`tfeRi%7@r>Q+M& zVu)Dm?$lte*IP%_D!^B=$=oD@))7Db!;8;5w$fPMYyiPM70X@pwHNy{KRj^WvB_>) z*RoG-u;7n>?E=!^{9~=M;)uzoHbAJk{qC!|`HYbQ3j6EdKkVUh})E;)k+B%n3!=|v4(&c?2en# z3-I3A`wHBQ&|O`w0|>A?#&S*FsQN#}9nEW}O%v&b-LZ#@%3=vAhJDNp)y=YlG?V8X zxw4_oAZ)|di>qy6#s7;B<+*lwb0x0$5>CBOT7{;NNJh?_Cmm7)i1ewcn-nyfgn25?S+o1g0#RUI&=z)hDH zizyc5l)?C#RQH4_D7cJ^_3OKruRKlpfo zlhrwWWedr@z==cIStSXWwUL*+T6%~i%s!Sgnf~MeiFm^2KXr)nl3UrS6d*tJA+`I- z8DJ20M{Bp6Eekk=eO#>8-S#RFLxC$*; ztx^rSHP5MLsxq8Gt}Jj%we^kI0;-h5!Bv+>E&;SC#uu_vz;+X%pg*V8<2C=fPZv&i zy8V`a`=?7>u~y2h_7IaNf>+U}*6N)?VF8gDt6iDvuC{1AefV>SxRx~6C{!=|n=*Ma`Ua2{#Bl6LHV`*#6f5ig@F14bR+i?^Td9EPXtGxkgh&)$b8>&_o zB?OkTIyY$yQ9d;qf9kWNEEoj zVscJv`Q{h$TvJ%D&Fd&B4*L&%0WruP$b|vQ3N#uxo!NO*6$Ng%KD$xrBY{i=S63nz z17sE#@J;*Jro-S&Q{A>au&4)yR)%6 zvjj_64nHoX7kZ16{_Zd3xdpXSD7mOP@XYkodT$0L2YW_KOlEpE$`1B19!pnOpo}jg zMBA-?&ws>T&lX&?4^LvC;T^k4U0_{ih_hPm3@rQCaSBO zKtwRf!e&h=0uU9re2U+ai@-w&o13~=G607V1{}WGZ2=7xIIGc@c>bU^QL?0>GtwdbAij>@q@=sd#?YjUtE3~ zzUbt?@QFo^@hE}t_}B}cbMydXQi5Cak%Rshf4PTaY)Wu@1E2RixhF~~l^~MD$K;b| zM=7fk+;}xUzUs-dxf&%_*wc4DxhG^-g3lFk|1o?H85VOaB19>>62ux2kz?daK*f|? z352Q0!Jrq3?1@r>g$yW0DZxT7{m~P8l=3R{p^#W%*Njy^c~oyf3&y4dLJVu6 zYzk477}6<3#XL5EF)HMN1HrXgg8Zdq(h#f#*j9&>J zFJK7Duh6{-_-tg)8AlILmW3|!mg^#WI7YP4y}d<^oDp1eUF10&V_e9ViBZb9U>OI4 ztLhktkuGe~jCI+bG~-=Pm^33_j!!zIV1gSlF^*9%tRKd~(0(vM%EAQSI1MAxwKFD$ zwPZ}p_K1v(IblRb$(%4E<7L>0jF@36V$2LxQAo{@{tpJ%*RVv!&jiJN5g{aKepNkD zO4Tq+>0XD;pma@e^TBN$G0uie<6!VGQ8Ht0$TSWH@2V56Q~D;j=pZz~_#3*uC?#-$ z8_BjlJcFJoi$gwN9NadC=NOg4biueBcFnk)ZM`ryXIr0?&pF=5>4eh>ikF~EG6ctH zo#RT3QgVl^CLEIyJZv1s@SMJ!n5M{HMk(pT9GjSN z$o#PVVbl-XAIAN#F&X*8%$bru!QQJeH?mcTL|}jdlh`RGfCBRx%!HZ^BY{{Kj0Jki zE>b#(JwWHeIH41ahY>@p2gVH1>nLT0$RtAFWAqTaNBJSPV5(h45^Zk`qlu2IFC>eO zj~S(u5z{B}>nP=oh@VR#Z^Uet5l3Xm&}JEZ#B7$)M?0hJjZzBf_#h#X#5B#Ar0oeZ zE@|6xD4oPS5u1B<$SG~NWwa987fLHJT|TKqp2MODBbZnep$rp=Ll*~E69-^46We&k zGqLF~qKS1%i6)w6=#EAy*~E;8#4^N5#yPQG80U1tDjD-cRT8Dt6APEe=f_AWI=|Bi z3B{a(M7b#Cq1e#(k#I}7sNnV#)cvNH6Q`kk6dRcFQM8+okYbad;gYdZL=c7oMok^x zxhUnQjz?&$im_BoAB?5i-VjPuu|V?>0uRT?Dl&~Qt}x1q#TCX`v624QL9}Mf6%&kK z2?67;nBeS*$R7XECwd&Cqli%C7#+2};f#=Ci=!^aSgGSGk5X#txb{0e z7?HKTHpXS0ur@|$ov=1aYXy&A4NqjO7TX}kYEg|xDXm4kKniKC6IR2xEw&oQZLynV zXQGthB4O-cuw$O56RyiQfBAyo+Vdg@qLl3-vz6|=X(Dnd7fec#*MzJW>uHw^zhc}M zo%T7hcJ#pJfh+J=X0LqZD~}$aTp07wP12N+VQhuBK@g>s7&G#3%f!ejFUA_|S1?V= zjxn9ql*ryFWyjdyH!0|V@?&g^KaTbxNw&RfdpO3DkqtX|uEZvXZwt}Mg^U>h=sCe* zH8LI2Wn^$6J6Hur`S+nEeI!dXt9p64HxGWdh>v!LK zad1SBoD;HXblfPV(oV>Pj7y^?1DrzXG-kkbV8*8%A2?*xSdd_J8Z#NjrLjQH$TX%y zYW0jxV^M335Nd3M9kU3jHD>jUTVq4gxP`Cy#qk7jGG_IZXFI{_8PmoB1f$v5*fc;e zvW*F7fM9eR6Cgl5FF4~B(AA6Ie{t}ZE1@e!!BJg>99;0Y6^Z%#?!Gv<|0*N_jACX0nW@S%%?|4d*; zlz?MCM`y%XI69*!rQukw6!0)EZf8!+y$BC?C^$P6xgogr%*aK-AQe%A2V)px^s$SB z7fr)d7&*t(M47qZ?Rmse#>{Omn2~ckouADydhUb;GlGs83uEV)u`p_mt(Ng}JF7iA zO6fTm;3dKMp~&;1l%jhom|;X6b34Y;u~Ff6XWcRrNrzk=T|h|Iu`Odv9bG~6=m8LQ z!Od?*kh_nITok2T-EmWoQnHSPCR)`o#*VFkQFd$vjI-NW0o|{3l+t(PX^~^Z9h(5- z?5L3iqsy@mPA>9%j?j0BA)RiNGI%8L3kYkJy<>C0hMq;X>G}S{pE(#5^4KOu=&@BY zHjfR$sJ!icVoV-W5H(E7=jEAz24za?oq!Y=x5rk>$UU}F#_sK`^z4x1qw(={Lh=P> z6qs@+JVq(o$8r#L>Ne&4*vR2ty}*CN=MMybS;BG{4ak5yWo?;v! zeJV;xLYAVbw=tIR_{^e|Cu9w10AOSxnV1x%L}BO-lqF;;W;EdmDrRIMgDQ+EB-1i8782Bmr;gfMj_yhQ__&kAjGJQIwUEGX94vMU%fcEv;kc)7Ll$xN@+wA zSEP_aBy-pdIYg!yMk2D5Q~2p6!DpJ07epzQc$_-o7?;R&&d9{=U1faYaovTiA`?*M zF>a9s6GkpF?J;(dX^&BhCuon7i)EI;DZR*)bKF8H$#{Z=GM14IOI66YMj~J;j8e9d zJ@HObovabQkvUKPdL%AJII=k)k)OrX`lsKzIJoi^P%9%KNq<7fK2mi=DgAgt=w|#Q zn+Ww<#zHayRS4rEsY0TZjAT)g;Q5e|4C51}BTrBYBPE$hF;?=pNxj>QoN?#>S&Ez) zx%6az^yAKtwIcr(8J)uKehBzh)JgvK2mPa1k+lB}KmRd0@%KL@^1vzl28c}B9T5C2Zum1G3TgkVKLs*+*53kA$ph-i{~m}&+CSTGg8b@Ff*jBf1V8CS+?Sl> zo&}G8@yr+A`A6Nz9j8a6z!*fXA8V}L`IyF5-;EkkJL5(k0g1s*rJ2!xEKR3*( zsq7kSb@V-G_2eCx^Fq>&1^pMG5oG(h<*7!CcY@-3P&L-PVm%cU--luTgxpK;oqW9$ zeB(|C&VDaCc*{qDa|``~Tig-u=L{_~&3kN^^m`-wxNbGMAPGByUBUl;FXF4+t0boe zO!^P+MeljtOD6Ok!S3KmG>dL|%ZXM{d_RV~?0smZt}0G4Tq*>+gBO!lOZ~*a49o;p z@pjUxpR;DV^Q3xd{e2#gV^va%<}!Xm#`;iszcVr+un~Z=*G^viv=z00#YnF zmD?)SS+lc0fN9cZYi)cXP1=!;bM;PoW-%0GJ`mY|y0$Qx4$LdZUKrrx?%+)yfIb)O zg;H)1d;|AcH_M4-r`09`DJpKY);q!d?|~LC{|5|QoNTso;T}N{YH@MZE$4#b^P$DB z5aB!jfvCqF=o@+v`zW6MDtUG=Z+B|pDzbN>+4x;()?Rhy(&36B9p`2%VoAjiKgVXvr}m=lpebW zlgX8nQ<$8C$>4KnYH_u&P!5giEBD~pcisafb=-D#D>Pn6_j5C4y60JxopL0B3hB%T)YqADCH%Zvv~PH1bd0M&fo=lHnjdPlSi5ZX=!<#sR4rB z!M^*UhN+D?vq^*0Il=7xcrI6~O|ON{1p&N_3Q^_8m8@%qrnAyue3Kdx%ypUPKsX*XSEczi7J9oML8F%+8G8ZeCWO^OgSgtcLGL4&$HH{3IUO$Q zZ-^kiHD5`E?g7E>;QJrJ^k=lrOpNW>1s}yuZ?3H`TEXL&V-N8;G^O&emO|%r`Zs+P z8e13|%kpqXu#1Gib59pn)EM*KmwpV7p7k-}ltOHBE8Koa$GPRAw;?SA=Eoveaq6OG zt)+s!FUM$4d<<&&$j8w6Tz9Z!W_P5=xJ+`8r@X~L`y|SpF1|6dmS8tu z^GRsjPS)itHJEdQum2n$A>Hg@JUi9FnB*(qzQ?O4P-xGCW%s&6( z(~;)~x4mraJZ^1yrMTz?H@+BF`{hrevw!#|#_zcGX)!Ce}$->SdP4Tx7W;LB(WwZVB&qnqJ<6p#Nl$2r{Hj}>R z0nE8!cZ_*)XU_YhHO-zu3?>-lB@bc`;^zod>t~V8wE3 zOX12Oo#2$Yg_>LmZv7n8P4pjKt4W2mpics0_j8e>TyB_J7Rsc-Rnb}^w=^G^lSmR* zd>%Q9DxKd<44_kLp2p{~H{}Io4GX2QXD{+Amz?bk5)hEw_RBAXT93{YY_}blUx)D9 zpNA%s8yRDW+LsLcPtfFj{}k$+TkcGY(n}IWw!x;m=Zkn%v$7bk zC&F+5!S3L?FCrXknUxN39f5IxQsHH77!r5ybU)RtDROqOlz3#{-~0f zUD#xs^1_F)byWZM%zATyXZk<(^~h!1%phBn^c}(OVAq%MtkW;8q=Vb1G1^^Uk6g~x zq>V(W&sGV+?qJVD2vYXsmXOGvAZ>jX&(0&tzZY?Z)j50x;shxs7G=R9 zIm~EN$(>vKqg_QF@+_m<-@pb>#$xRpbow9I@$|Pa6MduBaEeSo0n6mZ63nBZdenma-U=x)DQmw^GtbK zz8XfLul@%XmK0<1io^2V__rf3iMmZqhCZ%<^7nrmwzB+f^choTbZ1C1-IZ8$Y}SdB zYZmso`Q#d($uYr8RpiB(WDFY}w)WnC!dy1ea%(o!69l`05B?`oR;f3?l>=iNP7Xne zlux~4dSh5)i+tI4py|uMgSNTWa$_z8VUPlaZ>5*#up!@xXq?{L*o^0y6@C6YFc^t% z=j0h?8rOXneLRvyzUuUNeKHJAkX{b&$KQJE!OhO7g;c<{vcu&9kv zOP&sQ?#LsgMWz3mbBUBmMFbHUBO_q=lhe5QctoamzJ zWUImbhcMcnM-jzK^}_0O2!$jW^a!`rSX%6d^Dq1WGmBR$+3GS|5CmcD?#_AmvhWGgUp|hnvGpor=SMfos#rO{RS#yOg{eOOh1?GE`F3FT+%OThuhB9ZojJ!?HK(IT={Wn}{IkhZh1;R6p zv+}5(FqPai5y;VXabOpjCq4Gx*wE^-reW8pfkF^^l7_zw0L~&P?uMTt#gylk20*5S ze!}OFVrbNI;2o%^?*18s5B(Gi>z3!5$N@jWZbCTDwYy98j1=sx;n7$B4E?_~HHJ#e zmWoAI^pDUg8plt8gtgxXeujmu;urDMklum-VgF}XSidZEMAWGyf4u$Y0GHCDu$fw7 zM)9vd$3C>?hn4lo9l`G4*#DrnUcIzfi8G_P?SIhQOTP}IebxT}G0(L7s9=SUqet91 z%%#k-ydj^^^hwfzUCP63&G{r7h@r|;)bL()l*C;o_ihwqK;1_W1Wxcr4 zp4@3g=XFLfK0bEd>B@9@Ik^AJm_Y6ctiT@BCRc&5pFr9X)qJ8uuNj*dt=D1=gKfjv zzl3t@wXMlc7??7@j2z@DYpuj`imm^%zr;lMKZ%w8;2ZF$d|6CXLe=2b5zcgrh8~8E zw>`<)9^hu}s;f^j1yCu0hf%fKm|F?Hu@9RFA^tG3S4kxk!OhCpxqsLl+Z87c$k~sa0cDA;8qn~T^YfnbcQs5xLL8z&zx|JBKSeP+D*>w zNcVBV(mJf3Su7U4J9u?u46kMDUVc8b<99{I5NLW{bt^O>2zCd{r;nWzEhf5iGmIB{ z^fU-B8$(smNb*UAl}&#=24Rm>x~28OAe&-}fb?**p;xRGR_YxikD?V9JTV5Cm09eX zvmwmFE#TeQO4>o%B}K6_Hun6W`w8s8vNhF+uQBO+PQw(HW^ECKFnkXM9|X~6r`*ah z4&jE=F$9eQv(5fWJ>)+jMa0y_x)CP7M>*1r)QNdssg=Ws`Nh-6jzs6X)moOB_A8=j zbz5|dm5E%vyP$1ZY-zZ4BpPdIPMy`dmy-_B^ulPVC-*`_rb3DmjYMT*wZRM-QvB_; znc70`piBMp=vY`M7L24)FNaIzu+-kvPyy(sVFq&^=Qf-yelwB@ww{YIU;P|Rk3#0m z>f$muQA#U0XN=*GbGvq_9>U&7&OlpMvPey@p|e#$dIb41Cv1h;?yl#c*;UUW$<35j zx3X-;REmKBW6g|>h(i-0r1v6nXRkD)Emhs4QVb1rW6^EblNiT%92kHLkP_(gylvlj|Ow5dKY^mYTh7LyowG4$S81$NLQ)gb z{n3T_WFNdMp}L>97b|!et)i{?WE}4rLg6@9+lV#GfEi>(51m7v66PyF4kS_GbC}Y4 z+DuDk3HJ5pLU{eT=&DwknyfGm7s7E4ze1X*g>b$1LQIS7X0$r!<55b>Lx|j5sjij7 zQYQb~=!<%3Mrh7g3e2_}FB-d?o6#4R_0YDDo`+_yK5y*I;JHVmyMp$|BhTa7Zc}K5 zQJUU=`21}*o?4v~mQZ34tb<#Zb5l}r$_nGqlVM*6I6l_inhFsNmC_z)DZG?QZP)-t z1P?r)3z`D9dy%87A9z1_=$LwNDn{@@oE zoT!rM^yZQv1^q7(^M5fqxc4W=d)=A4f>(VP>P&B~ zRa}MLy8Hqlx%sU`9F^k*W7l%?v5nXwh+gv8gTIX-Y5B8Q*739IY*mk6Fm{w%EB2c6 zVQkYb9D51p$@R%L{E@7C<3jYA&he8&R)=7P`=jX@L>p!r-??z?AZn#KV|9}m!J&PK zT%CDd&WEVAwGTU4lH056$oyn$A=n+rMO7OLx9e6j`vRLJIcRYNctI<^HjY0$R`3)m4KBp+eU5m9W=~ zICC}LmJxDyq?lF+bNRZ{XeD-T?dSOAq*f`ieaET+MJ_`Att<*w__i~9(bxev^PJD}(*{l@g2yjGe98B7lB|b&4x@*Kk~D-1(pCHvjiZ}*d0S)LJ3a#{Okl0k zi^@zdY}J}TtL4Vzmcd>q;4^R$T16)vJ+{iW>Fa33S<{1ByT~}5hbAyP`qFDTQ(If0 za|XM41RhCWE~RJG^$iNxsdQg-Dw)rCbP;0d&3IVO6PfUJsD;!jT)n87 zGmPRPH*WhTsH;SAz0II25t*OG^Xf`vR-^AI&e?gMhJ(&j!Nlu9V(#R|jW>+FB&xK< z*{wi0f9zZ$iky#Nl&cTRUBLLTnV!lvZ=A<5wvFWnUx?eG**GWYm2^9N z%fE6IE6AkP1r(YTCI5hQbnj7gw4SRa=Y$<8ewu1)XnQDIE*rawvkThha*i6%?BL}G3F~hWAf?aK7hcmTce>KsxrH~&u_><_ZVVOx=p!H| zC{h8=PG8ucB_hLD=jTe<9-IG+h}mt<7ME5+3gRXa^LMu7jXF9e84LojCnEHvNG&BB zX6%ALi;UP-LB;3z^^ITAc{G z=9r55U}|F}>kdezKTi6*fK$|&^);IaBu;Y+tyBo7F}ZP0+SE3dk*>%ac~nv#-(A2fD#|E-*T7QAyBcn)+nSNU=#j1ao{oNv6{U)AebIh-11ni;#oej!1=zrV zJ?K(qa&?O-@pc0vZA?$srO*%1^Em8bdUig1`E~IXcqG1B!O2GK5%UU+qVnzhLRhtb z=L$@QR>AIMZmu0dM@ZrDQe`dU5I=xgUpjVabR*qu4H)Tt`AhK_e+9Pd?DK&H2RVLN z-D!z}Ljx<#8T=JS6_*Ts2L?nu2RHU#7`i_o2N#(&}aF5{Yorit!H#MQh{&{ zcJS^i$6kzYcunS3=7Rg@Fd6VH{h;!EyAudJ8V#;O zU07G_tjPG7J8>V|QV~}M!RIPyaP8IT<<(bh`}fAMK%xNg?`y7xYe-A2S@eCpDRJ6es0O`C9-L<1et5*v$H9I1>w;(V+W&heQgtQ zoX}t&yap2=tZx-!46MHKTJ-zSwcDN`@bjHao7o7xRk(KSV$RMC7D zwq5f6`rP0P*CO8xYwnZ)dW;BO6dOAhoi8--{u;m>%xeq*tfp@!@H#R490suIR&_d= zW2?Aqa(g0lfmX%H&e7MF=LEQgcOZ*t1e4E6=#V!3yHDU;>_asmzId^J` z)x1|G&>?j$xi(V}lFTFz#W8&^F=f`n^}H*NW>3bq6`;=w8)m2g+&U6<`o-VF!* zYt>4aocN3aV;V|Bytsgy?KEqX3^WDw)*Gg`0|t&tG&MLP*`ez*;kr&w_W1eWK?&c; zfDEjDzQ3Li(Lj(vyr@oZwAVv0^tlX{P|MWj`z>Z+XU$-8VohFk8Hrh*fxfd+FFpgj zNj&HlG@##ct>6XIEJDsJW-wDqY%H#JORM0oh?JU?-ECGY2(PnaS4SN;-=Abr|9i8T zDt(M)`xQ;-hj|jW_D2ixTo<(?c^P?e7JHCulu$&`ym@^V^X2EtjYf!5-jc<1B&}U; zFc2F&oW+FeEwP_vpq&W7{O3scUyMywLMrSXIY=p2o{guqboeF`Qbew9@jy4u{} zw;ZNazJN(57y4Qg@q}m@0;Ko?Ce7FMN{s>R;D3mePTE;3I!>7-KS;r(F05{D6DHRb z5z`dA+FV~{wgCYkUJ)g0c}vgrH`tdIAIZb!@lE1$qjG9-5RwM3Eseb>c=@#eafzk; zBz%m#zPqY4c8pV-?Orh5q|)pwLW+6i z^>a}8N@Z&@$t>o3RdhfDXGKq!CO1eLyoPHpsY^nU>Grw`TF^9}%9k3A~ zB}9R-x+P-jgcN?H3JW)qtq$H2{0tH9P;KlGzLGIqFhYy=YA|v9+CP3hley&oUxT#_ zouv%|q z+h{;KQv@>UOG}fXaYML2Iv0UeC8Z}#mt0g>F*kd6P;YeLLwsH|52M`FCcHf z@%0x?$f_Wd2;PGfKe}mV=H~#RNR--YqThU0R`FhseEAsNU`kEk;Kh}pvHf+7N70J2 z`7VyLVAcdd_)XOIbU&l8vfq3esH}=zU&5D%DNz2Cmq7z-xkgM@$oEXfgYUl#+gGUC znG*dz<~3ZbnqAUCy^^n2zU$>Mf9gy{w_WGy_e!YrDESqyO&bwtyV)(*0nRA1Vt<`{ z)9u_~>pDz_)n3-z(iY7jSle}2+g4$!w7`G^Vq zk;UsF(1fHdhaqSEdgv@3Z>H>!*t_d`Xk)TkG27uwNA$7#mRCS$c4s9ahEQpbi_z#C zj`bN{o(wrANXMhGs$|6(E1w1lz%BE!buVlH!S3LVE;=ij>%G>-oqz3){c+?dPP#bB z<8}{h;cfrevEZ%qV>ii-HS*UyCwuNLem^~N;0ph?=w-pn7shTml_PlZrz8gsp29J_ zUDEzH9L0;nkJ(wg6{gPqbV#psMq{;1w?$xnY=@91gH>ur(&p6Sfq zr#Z5>kH3M!-?yFoxxFs_fQkG;91a;7o!sfY4td~dPVjxE1AOsQ7$7Po(*jO>e0Y4y696l9~gfmia7im&j=pj z;7oQ%aE7k==?@Bix^sd}(*6|A3SPlqg(7`E<-p+4vz!?0(2#)>ub*;k@H3toyotY; zMNB&Rql5YMh&&;l`~kw$lz!z9;b%WXczvA=@XQAZkACeWVYN=4IECYc({b{^_L;&b zais79X&)XdJUW?kh3NpNaJF!!Kt{j;!@v1_;gLhnM4Q*i6ZmT2$(}O2C6NbC{;1&t zG|2q=X~UyuJZ_k7+$kJ6tWaw`sdIVS|aD(Jkv46 zIE#p!{kL-rG5Pa!Y{bc*MNIzK8))g|4RBduhs*{Q@MoTvHJV~L-7DzQ0Fo;Zc0 zi7Og?;AGAx9c4(S@g|NxK;Ss}lZux}du)S@qq9yptr+na$5=hx zam5+xra0XCSI;bd<|B&>G;5v0xy5y$N*uv`3a1yBDd;=#2xIZtPcUAjH4x6ieWqiK z$r0Gl%JFlIr}2XTr*xQcf@0(FMB|e=(AcKY38!C@Q;l&1@ozrXn5VyxaSCS}7buRi zaW>s=I^TE{9&p^JQU4T9Ic~B5c(O+wA3Y$@y?g3$$D?OC?|6BYY=w04haSrwdEjJD zJ`N8)9#}O0p2G3ROVprF;S6LNJ8+)tGn|7wq6ZuZJt;ZH zV8>IAOCCMTdC4{WcnN@w_N#{`;|R}ZIy1RWi`Y{*I9bgx{D;E4nEYTrO;aKK89pV(uW-id` zTG&3IdGt&tG#@{pIZ4ydDV);m(2##}M>UV=SMC+C~jxsv*nlic|q=XP$O`-h(3d3GKdo}(nz_~)PI zxjW7W5z=x!|HD+zvudvAI+7Rv8wsB)X+QlpvOaf`SObT_+|17nwvSBx+(Q@O;Jouc z7Y%i?J0CANN3I}ZKF>VRUcjeaRJ1=r!ofVD{aER$#miGepEArE*lAZ8j^)`?&tGMD zKaXAc840*O9nyl-H?B3nO@Dku8BMYeHe04yVU6Hr&3Tquc4>_;$PNb!;Q@Hn!nMqH z3!F{fx*k3T1(VXY)R7wuFNz|?&$AP^FLbh#pEGRCgJ;&$!t;6*z5Tz0E<8HeVA;q{ zzG=Qw3#NbVa&tExpLkYyUZhBRw*JL}E`viW`P>62Nm2D|Y)K(Kx1SJR5O`cYJNT-x zjNNnCumTMn{hj&WL<+8F_B$8o4IGD48^Y_^l8E{U9kQ}l8!dMrXbl1Oyl5$FUTs_n zklmwSG(IU(ggtv_{lad81MY}xjO*A9Uou=Qb+$$Jj83r9U*;D{GgVEaa**x3-SD3N z{GAe^SET1`Y;l2;UH3D?TK3${#ONT2Kt zxq=?W8$zCUcJ!4+%Tw>(VekqfVN_`y!RUD3RH!QXSONNm=~-j^G~0cbp$^VvwsyE8 zd|bf1UuY~5yQ3|Y(|l-7cIoTJ0->ru8DYm>GP&42zd*Fm$w!S#;X@~^U`MtXm#ejJ zxa(im6h=m4V-1Uq1#IIzhM^KK(qCf;;LR>4aw5Az z9+wTd)}f}f`Q1UMj`gVG7(R24;d6QFrb_kSYdB^E+5_UUfYG-wXX+)2&VmE-h_M^1 z@y?IMAHn(F2ZXnS_gI{V-jFtl4bf@4kG*rfrJ2v|O4jtCp;DqUGpz4kQ;|dky4d3n z8uo6S7r-Pt(XeP`alno2*2r;=yyyyYsry2(-XR6O!!3J}h1}(VmrLb3)w5Wr`6knz@;IqAgw4sKlXI7 zcS?-v&N4XQT2yCVbHiWG+RDDP-?&_&3cc)|0@Hel3P)JyF~i>CTolZY0>Najy|!p& z>aAl2SzoB;#|@nZkwEg;l4cP}htw@dD0x{+N1Z+V&`;FE^L-_*ReNYnFb3ZURO6Cc>sUD5H51`VP;I(R~QL3FyKa6hF(I#|FPXKH zZ@;miNTQPM?3@#Zy(RMm`(&3rE-g>};DlklpzZiyzF^oQsBnvqJu2z*r~@u)`vFJ! znB=N$taiiCXp_)nuxk^z?x0a*PyGxNsJ=}tdD(Eu@{)OaI8B zRNJ2nKQNTc)6sz~F)UiXHRK6~9Y})ib@_cRk3HnJdAu$h2_$v*1-v1fdN8=Wf|&oC z3b(Rr|I<*CTK#9kj|?SVm)Gxub;E~@@=lx>&l#@z-G#&|}L@XW+$brakBpFc>sb~LUSZP>5 zJS(wd->B8fe*L~-4Kx1Du$Nu(xWUXqe=}4svRdWQkTo$r!W*`4W(N-o%ZjYlPI-7- zsf@>?@g3IMQG6w#So=bekxf&Ztvh#G?QCqFadql=qw%IZQN^>&KEVWL$;KQwl~$jz zm@_vlq&U@_KM(TNs zF>DkGSCoG%O+<)O5V|O*n>wdBCbmW<5O4cft!XEFd(^lzy~TdF2;HKW18Fk9qU4{r zo#>8IR@Z0HxXMuA-A(_U@c@kJj0N<8$k;=Mcf~Y`|FP zor_t{GVU#zr@sw5%5ZvbJu<8~RpDDz z7#x!o)_i%siwi92oyx@miN7jKuy1d{?0X#nmmQuIHm~g;`1`Pf&tRjTwbf`eX1e2r zsAv&W!R^M!^Q3@^8k^lG!i{BPD+IkyQrS?=Xg6Z%vgzPY9hgJy#YU%b>&B2%35J}3 zfK3T2Vf4x#^l{P322EVhXZPE@LANu=?%B0yCA->bypIh$W|(4|+{S&Go^4Ov>oy`F zMTONpaRHb3R4%6@>uUX^L+zTWYlrvdNds|UK2__ti)&b=dC^w(^bX7y6&Po~++o~S z>~Nq1L8lC>Svc&lg@S&&GoX0f_7D_>Zs>&EK{*g$CtAhjspfNy4I0;FwC$cf&-nE` z4aWDKXASxZ(t&)lJJmX%v>|vA@=*FXpIyq2Gk@O23NAHT+1vI-1;z7ok7A{NHmpd! zUTWN@cW5k>zm&~%8keL3-6hOs_PVw4*y2fDm`R<$SjPa^o3N zwXHQa1YbbDs*qktJUF-qeWiksZQftFnjI@IFzF?QI^(|L`FUxnT=(;J#=Ja}#t2fE zl^fR>*d=#aq|~uS;~jY-N6(G0%ihdSFSAJCxpV2a*8eVFpxeXpYNZ`)_fex&tnKLU zhO;9iQIhS`W4N{rW1zw#thwI2S{I_J{LGEVAU7{>H&){aeYgWzuKySUZgGS@*KWLC z7sR%88m*FsG<@RTtj-hIFyD|miB~%=wAaD${+-X5U8>|bGiHZ-^J^0eb{y)5V5ZG{ zazA>_OJa;Xns({_gaXN5y8uv~ZuaJW;{_W7A)DXh3d4*DgR5+F**&t&X`kc8#_ac_ z+n4qkKWP*RfH+4&B#xwF5!=5dW%Ek4%fJAwAFF{k`(1_}v46aLPn@A#b9#!8Uk6#y z*>5b>3tsXI%WC$@Z3|5jm9FAq<&`D#bf9BL`xY)wMFxzYHtHzH+U6zUQNuZ&yPu`W z=FadKE`+>~J;|u6owZz^KW8tjiDFjug7@X8_X3qquxp|q{^Ygy&z<&&_@|0v#s)zo z*k+wXQYQ(rS-U9_CD$ghd}?I64*u`Fliu>=;2?YKCDWWv-aLs;Y6Z`E#k875|712v zRJx2UDU!}oWpnS8(NQu_k3n@xzdU8U&L|QfFlU14b*3I`h`@8FRV;1t0X~?>D!=GR zHKlFCQ5(DLa^v~Z@I<+*3Q;LD1v}^5StY7A&idXI*Xvc@m^A`DNv#6H_nQ)r5)Arn zA(tII!sAsua!`S$#O?6-Z7yUIaE25)1n)(5^n_t~%5|CXDx;3noV6cB!fKu?w`eqC zAm;6luv;H1*v*dLWmqjjAO$L${dC;_`Mq264Ycn82p{WicQ>Wl|v3FiRv25&IjOOh- zj9UcujjtQcsr*C6F@ugAp1m-H9iBt+(;G|-m3AWLbp7SVUeRCe3x-tj*fwhvMR~jh zSPsw@LiGPK?%<@6T5>NIhAK^|>p-4{XUQ{vVXhRc;m*P-Q^E> z1M)v~*k$XtU^HL6&-hofNYLi2^gq;5`+aGeDgr`wHd zrQTSy&gWtF4GVV(;}c`Jy>J>k=bVrTO*Lbz_?U6MNVU|!(p9XWPzv&@Ah<6hsSa<8?V9JqBrXvU#`>NDus+S2T`}b#o586UmeU zlg*A9R+_?2n=^ugPh%ytdKux2DpxVvFD&aNsyoTHK7%7ZPp4*8QrqyX@hM%4^Sk55 zAA)NTn*PCGn$z=7%R+ei{2XYP1C8!ZILciO>ZOrnvv9jV2K~dsX7#v;!>zsHadz-^ zV@pQODN&_r_8RyyyQaoulBiazdT6Ld4|`{MzLnke6XQ_vKXkfI`&_ekzKdPe`BS4; z(sCTg>k4xipGc_=K3vNu)l3KK6ji2>S}U7JodAsCj4u+d8HlvZJ?`(ljs?-Gd}l4@ zU#mh#L#uMbe}G&$!ZyFl?)N$r_`U|BG^$v?> zk-hyp5G$%Yzz#eQR`GP5*(~zPRqVKeA!&8@_bs%t+kcN!F;CZYRY%hJmhnN|d~x-g z#-piei|~DeM0q0=Rj&+_1THNCC5_PZap_@6-Y$6xc!B(sNF6;@J!R^8V0d-_lY_ft zQ>tIRi{;R2?3-P%nl-%#7NS+#_a4X zZ3v~0(+J%Nx1L4T3r9r)>*KbGQr#2>1KoP|*&^W{9k=f#Q~DhYaBEZF+aN53W=#e0 z%bpe$#xEn|;Re3evXyB!*RZnFV1DQCT{ou(3_p`5*2<2LV zckYM&?f7zOy-1}vRcg_%z2Dj-OqR?u@% zlz0=>tot#$@H4%i`<=pX@oz%ZA_bDR=Ll0vyB4VXjeXfGR7f4uGtG!IChJIDTWNQ* z%1Zm*kj})!-Rcwe>O~+_u4dNh0<%QrTG=^|1XR{6etUgX>ioH!GVHe(Z3= z4%?yp3fN_b*A}*iVFXnaB?MR%m?ctw@Cz?Vnl|ndlXw?9KQ1jvU0EtrRF&A2RX&FfYO6-+j)8w5bh12$+X%jq%0yik7?_>6KnS z%w^P13TCO>6_6_x_W4f=?}_wM6}nD-+H|J=F0;;oD}*~rcu?3pV!~8bRrM*sleNyN zf4xdLLl-=9S2uOtb%HmurnjeBKO+oe9m{{Zlx~Yi2}bxmk!F>SCZ!lrHL#gk)!aiU z(MZmk)A^)yBP#vPu0l&mNk?**l2VSOxfWkaJ?Thh*BsZ=j%bfE2UJZwlCkt?Ye@M= zvJ}VKG0nT1AJd8|Usq`sQyvLL+XEHs>vs$9=`U~naG4zAkJ{UH6di>#W86NE*vPW8Y0Z`X`H&$D#_%A1D9Q zvXJdo#O2KNo>;{eFBbu5e?ER4eoypg6*=%dkm{GdA+BO)tPr0|^?qM?P|`Bz;M%4U z#pku$IXV76ng;xBE8Fad8pJo`cq-5_@BmMQW&PnSU0?syCF_{MF?m3Yz_OKNgQA} z&fw&S1T(98RoKOnkKnIQd`nozUVT;YvC~fq4Xp2^uq^#zJ^RwPggvY%mTzNyXXcw( z(;#R!-%u%Rs14Fbx`wH|`PyFzLkmPo(35uWA_71;dUAwET0*|+5@qVK>x7wBoRamV z7bU&gB4z5~Vhi6&wAhrYhpP_KTZYa)e8IdVYy{7YKqo@^0S&E2D| zKPTNcE*C+$>%22v)9jgY+$VnfuE`>fgvSP3VEzH!=C2qO1L`VE9rTJUVd@Hh(jeYZ z;+bFMtZEk+G>Xe~2a~yn9Bj9T?Y>$(kOIps!#TYWZW%c5-Kp~zie(1&XbqP2aEnOj&S@b5 z-#ac6zB5;$=XR0&4OOUT?SjCs3<3vxW@C3ODAOGfao5?xzH>wVD)!f{VvW>a*VHrM z&XNW~Lyew6qWgvAPb;-c%q+f5mNoMw{`}J2+|_xCa+kjm&iyrYB92r z-t85uOXdc5e?)Azszh}9#6Rk4ge^4?E-TL!zai;(zAwy?x(L=cN9rQ~LVr&~MX#!p zCfRu=+1VNd7H_)at1HoQD&=BIB_waH#MPA|6m9bhEmxH~9aZ8B`V9^JeMr)fg|no8 zkzj>S?WMb>tJy_-f!5m7)eGQCQto7~l2AA=WXFF3?F^Mx5yK9-q=ga{Z(>cYB3w`A z7u&8%7H_tSkLkPND;*+K04mBcx%Ft9L4`P0ohP)7;Ec8m$@S_+Xsz$);_0^1^LgDq z%%@fYO0w1L3_rwNt(frwaqq@?1=F+3E`-D4%@>HjGg#7E3m{*KXR_e~Vm-Tb6w=7w z4~V6y1G4yiNk?AJqT`YVyR1E;-H`NFHdSVBQsQ9DR|)80hf#Ak9UMaEsZb4r$`2lb zyg-t@JcOy8XHdVYRu~@^Z^@h6^Orv@)~C?(>&=pe{+!ic4gXm`iM+p3i>EdQ117(? zOM&4wWQEF_>DVxC<-2CY-FmT4%wYR!#lA6v?fcQF0y&@p?BHr)d20M3@kiSBWepXN-)0gi>zYdOG-5#75H;;u_O=k^&JJ4_*|hj3mS+6b<%1yE>79|;z*G{MGOpmq zO)WNk1!j1jm@+DKIoC16< z@Ya{JzF!Ds3?jn}yI@29IV^gsxR#x$D>$7!v%fH(#{^i|Pxlv=vbUcxZ)Co{!b^}1 z)a++(^%a)0;~_L~?2E9+G<{JVF!7P#4?B6bJQeR6N>WcuxA+ zxvcFs;s#dzHL;Vue}QF7>bC=hwRvoh!@P!7eATpug$4_Y*-!2ik?#7(CNq2r4-Xbv z+27xVxr2tyE*va8gT;J>B762uah+c3cXx_g*vN0i73}Q}lf<66OEj}@y9>`|UvQf1 zSm3+D8g}HXCV~C@Zn4;~4{-TDbo7X)a0xqpyF}m0XU~6G(%)^xho9S=58sXN<8~ME z7Or9^?^64e&q8;@3F-N7h--5jay(>O$KTnt$4>V2-FlUIezqia=pOOQk`z=^S85{G zYx~Vx*axMB@TmTpbc>$GG9wW_ujR1RG%+atV?B$dRw#W35Gju(H(aD8Dt-@;D3N9Q zqE8<@az6q^D2-)W#gLH!wM3R#GN8b-Sh6FF$QP3Cft1BEM<{&l_vRN6dxA8U>G%;6 z^C*2KS1hMHmA)}(nJhDAK$TDG2`x2BLmHprABay#RijCFtnpC2xk#$$pK{j?v0E>O znWf-SvqhX5Rb*szQ^ZiCk%bs2lE~?pNHSHPr`WSXWO0@mqQKCE&8e6r$ujGi3S(*&(LC2_2aY8 zh@UsOn;+5#=Yze?ieosQ#aFKX2?(}Uf^?@^iN}6pTE))% zsTePrCo+vnu%G#<7}Nc*zhIP_QdhkyHV6{sqC~ndHFkw~pqh#D=o9&A{U4bH>MC0s zRlh>=f|%?XuK|Sq>(Bulc?UXxmb&^f2u$3!Z1I~AXY@Fog~()$@C06aMD=^3nmyd* zrbwb%33m6J;^&L!g_{GwP&WkrD}E_nsteH=OPWRXjgnAC>c8^+)m6tTA1Yy8pk#J)%g7Pfi(=p8h0D1Q18Z2YV-2-nv3=EKQd8 z4os?N19rqs0BlZHMpf_RJh!?!Lx0CmsL~$xz~AAKMs?`qBC9o(zfqK60|t;=!0&Vg z!*B~$c&w%`=mzSaKkSk1VW+IvWP2bSWb1e4uekpM>4+|SV_{wsu_j2%lI2*Zu8F{t zvoQO2#q=eTB-yrDy@I3{+?NYL!8~;6ddm)W_#$bXn=G;>OfV3#H#1 zrLORdgQr(i^)eu*YuUqnmOmo>V|551*l826Mo!BI8%BeT`f;NnS4BGaAb-QVxB>I4 zoG=oyRkFM{7LaQ@#p11G7p{;hB`P?`U0BrHhsaA#v*RnIt6=E0!B5Z|cDe#~xRr;( zA@~qFJwVa%d2Na(jqoRbcD;1EM5Pm)v}>0nDi7OSk+ijBo-k4BWn&uYFVl|E z?6s@J7g8TPOM2a?r=XpCSEfv~oa3bl7fo(=%A?kRVjWK?A?xITf~X_BNXExeVzl4d*QSh(BmOob5BE{bQk7O07lYE~S~S2` zdL-u(Yc!_BWopzKjq}I*Jkld<*egAOHgpKg_^B&{6!yPCb-D!~nh7j9yIYn9qBbn#Gm z#qYPf9I`7MRGjv(ALs@_n?I!3-C?i8&gmDz?EDU&6G3`fr1(5>*|?w13IMwVyogKo z0I%N>4xnwfM{y|*V2uR>0T%~RK};+U{VN}@Xl$%*?O<EN8oe(k&w8 z8skD<+MYu9HZ9ZGoO|0h1WGoh8_?;>rX?HGN~W)Dl9p>My9Ki4QogZLT_RA;@;u*I zjv0_HZj^E?$ACu{H%d5`Q<^Sslyi)Gzs<4hB`EP&ws1>I`VO9b?BHgp7xx*uw)I^j ziJSdicgXDpMupu0nU3qNWyHbz@EgM717Tah6?BLE4rdWNSYCihUu#*D*TL3LBj$ei zo&`&#M5))VD5}q+qb(2%&3s50j;D9Hwi($Xv)t?(Bh%FgIT%9N0BKIe+VDUSnE-iK zYTFayLaA)LORgzr1sCKO%?>xaO3QFSFDe>QMgj_l*EnaY=)i$(MTB#TcsYjyQF8&G z1HiRm8{&jOOB{ZL&UxHEyUiAI20b>H3+I-=VBNCa?e_VBavK0_02MGVl=8!!hUl6`j4p+eCas>kjZVb8Ie!#dXo&b-h zmV?1CC&W`&+6}&tT@GP42G%<4epHq1PP>DL8p7LGary!-A1)tGFUxz#aQZAU5C`O# zu>62hd{x=sUb2yArYoaTRgcZNNit2k`|$6ACIpU(kzjhJtpt&*ig&Ehsj) zCWIj=1BN=_SKxLW;8Eck!}%y&&qbIy2uyC-W>XxtfS0wbUbF&1;vN}D*K)|_LI;9j z#7p|UXp^5bpT~}7vpEh|lNPbO1s0eyZ-S2Qts={c58*x~?1RsWPwhhGQ@9Ur;s0Cg z=f1NoYuLZbh38l}lb}EB86#paTee%8ShrxQ{@>wHR;R_4^~n!@WG?xiVN=_gu)uO2 zE4WYG$^SAV4Vc9~f38KDBKc)EhihqhA_2XT_5>q`11(pi#)`<{fKo*=C~=5eWVn`= zDRM5?5-D3Gx8PBE;#!m}@>I7I$`_f-P@!&=v~-b~hL1~Hx=8I&M%Ae1jKl?pyPT5T zNhu?9Zf+Tt6>ur8U@!zq6ZW|rHjf_|PBPA}#~u!OfWoMF6|dXx_GYszzsj=S1Z|a+ zhK|vRYHp1#675~RZBA9w*w8TL9PH#AX}#1x!sD)0Bo@c`Hcl11KF~8;KnZ$bXjR+}F95DwK$8F|54aKC!1D6MZ;8|~ zH5!DtL<{d7cM#jbIF}HqhdfG8<1*AsvYe}2#yeHfALTcSEoQLB+7AC{t9t1;cKgBt zNgD_8FB*dCIM4Z)qz)s)e++gHMTO5o(*tSSUcV!(1c6x_!W6o~VW4n? zo$inmNZWw<;Jlt7vXD>18Ilq1?+EzqA&)Zv0A8oZjy#h^4G zl{?~Yo6A0hZxOASRvIDl{q;NXBtyI{;zMB$C$mKfg)f%?0OxVC~ z`<778tg-yfEI%RPV*8+AUdUNV-@c**ZhZTS`dD8=va&BEq%+x(bMc*}KZg^w^V@=- ziLaT?2iQvArhuFb4s5dyD?BUa&xNzuzr1GJ&ffm4>2&sTQnIi!6ZsNb@-b5lb8eKp zY@;Y4yzV=Kz)s$b`_STXT!nUDZzxYKnvgc^~JhnqK&PY zlK%blG}q(Zwf9+vqfw;zt~)I4Sd#sU%&P1&hb7ly)y_(slGkytTaHL)u)C+FVwHhC zaRf1(4M(JvXJj`Zqva%5fwY!3SOeopHhV-`U69=t`|J_vZ1&s{Y0ZMHitN`%q^<14 z3_fx8tx_bn#-UrKqO+_NgMW^CrL)RV3Qee5p6pg$xm7A*$1jqW<+T0ti=^!=aoe0P zUUQqYa*35xMcGX}cpLg~*~L=v%B-hc9~D>BE+p7v7faq%*>C$cQRnt2A~VW1w)B9s zWn1>^xRQtgyeER&_q29p*5_57@cjp*(nZ-dpi^Q;4@eD%E|GRGv2Ief_tPa-tHs(b zkv5}y*)>Xs6VYvSMQ1l&0&IqF)&qxbrVAdYC+`*o)_5Z_L!IAgGO@+8QX%u#o2QWOH}5aReRxKKQ)YAMNaF;4yo-;z4(Gz&jJrZ=@t1l=gT- z$hUh+ut00f70W~hkQm^v?3hB5v)423Yy9#mRLj*fY{{dhG>#ozQk(r%?(Vd2S(*rm zv5Ijwa%VrR$4P4H@TpLPCt%lUlg{%mMwEt9(_g8YQ-)guK(J%^*Glh+^qei(p=qM& zRoeci>B@n+a0g;#y!@sD6evW?LmmfYX$Th~m)#KxD6%W)@Id_c_|(YlZyuB?#eYJH z$Q5xpyXV5f0t;W}$w(+Uz<$hd=8yCiN-Xw>P|iN~El{BgZ<1`Pi}KQwa7lgoCTWRI zsxpPn`>eFroTL~QQMCJ*eK+oVm!);y-h0laH8d~ zS~q>jr?O}SEBvCgg*|bxv?_->zJ>=|GW5&3)v9$h92rhRHNI_5cW?Wmw0UVR@%i7S zVgsKrEoau>3MSR_c3L_R;X3*v;k?hQG#U?pl9JVfct_W_yPV zH?rfK^EYt!kTX(4cSyGx#XX70Fmjt_c~*eTJTFd1gRRJ5ne~{wDCmA9JA1=Dz8nJB z(%ERANo&)nDA(SJaoLG1*4fRF^wliy3HsA!KGNQel4c8_6TkP3>r{8`I77Z$d_|HGVWTBNnz}?mo$NbWUuqCbN*-ej2_X^U` zjMp)V1m;=NYis=;4{~W|0icrEY#`xi8_x7Bx@UL{X{0k#1HdDZO+Ez^AC+w4s7FPT zR_u+Zq`e#+b553$4@*+QP8_hTNFDg0bhRMLy|$|HbYvo5QEA+7ivh1Ld(NCaiQo{i zDKhAFRt}USe`>CK9)+2EpGWVg^e7KAFOsNejKOJl6?^1)sjOt4X;E0-EttXce?NpFv3p%H1uE1w+ZWH#hzsozhAHm{!w%t3{9fUfNeO&!jSH2zt+3 z(h9hWclg^#QIJbZW_&GS2iHQUBY7JmULW@d*(*+z#-}SY20PUy^rm;ww+>!t526dP zoKyS1R?&I04w zn;7WiJCFO+Ow@#7Du;PgbxDy1>(U-ljXS7SdZG`+5wa35M|$`{1WE_Y9ZDwt#ixO9Z30|S%o z{LFI?xGHyIuqMQTI(o&S!RdPV?r0FW%k_Z(qz4eiyi&6YHKNsdbqbL}`J^$kvf+3J<#K1^DO=%`{b%%l?OAHYC1jYMZB_{TG?ZMLGr=pc8h(L4fh#h8@iA!`gi zFuXlJe5*Q<(1!2@OQMI=aAqd^<8mm1*}}zS1U7Y5^`dpix!Kyq{dM435T3<`;ys9r zVizc*LGI7Pf7idjCmkD=KcZQ)@-5DHAho#78cFgl!MDF(8Rc6AE(e>E1JUvR0hCN) zohIe!#Ad8yA`<7jolgN4A3HTTJ~BQm&n7X@%1c7C%{5Bd&1i|YKVW7=EBkAk(8$Uu6I*rst~^X-VTeb*S%TmJ+@*3 zn+}J>Iw8j+d~Oof&^UK0;oVpN=tqdx;Rjt`BMhC7fNL@72CCq|VINFz6llIbl6-z* zavbk*CB#5xd3$QN(}ZbEEGZji;~Iq0MmAN~q1? z81`w!l+b>4Xgkx}Fa2nXHI6HAsBaroFr4bd`}E!s0mi;SfA-GuA#Ahq0bPI-4x-W* zB9gTCY(A)MR{r+|+pP@|+CWs07e^(%IbMPeClE$?n^=u)RvS(L|2aoxm^Irf*`FOB z*#S2*l>?-w3D71W3p7z9Q6dB*@RV063by-b2**DCc?Di>s+$}QRGr!F){fEP$Pm;P zI(xxH(CVuc3HyZenaC_Z(xd6d5;&p5@J&*zquSO^-4Yw_VmurBSxRF6KLZft}-JS7#J&AS+j zVu#_dQ#B$@*nCkUmRv5#e3c!Yf=c+li}N?nW(dS2pD}!RM2>UkGR~@WIYOI^Q>6)< z^HFPGWE2$5S~d+*9G1auw^{p6ElQ_P8;jog$$_4p!m=Z$*d$H~-svd~KtB&ZY-cXK+v9Ac{o%U%EKBCxP2{?h0 z7{%R1RgQt(q88yCl}kZ-OF9d~1SS+tC6iF}W8ZW7kr+VfkhNbP8IiYPrjz)alMC&9 zW|)xbXUDf+14Ol1P*OPJsb?8d800yPUve9RM%O{^l^_MjX={}c^EfTmoolU0nIsav zkNQ;7+Q;>aIv?iGH-)C*LJ|pX<{geHR!cOjjSdND#HF+7aa>7`$BA*vUTb@ zxO9$#y+y6wjr<>=i=pTwxC<5rBJ)5bjP1Y|Nt4FX#qq7&=F}}08CB(6F0Jt{aQTF9 zIlfLRcjpp35_e)fam2<^iZ*F(TXHZC%0hy%GBr!t0uopx-cT494KA3e>rGpmKR*W@ z=S#^cQEprMxg5nFN#8_B-co7xcJ5!QkwAVch=e%-k7Qce$X882ky#fTqWXG;?dQFSL#L@iIG@DAfHELYL@+kmpHaw}xZPkTAr`5Ji9FdMjbYrQ*fwd~AG_9R+EAmi zvT$5caBlf3(rt*QonGlP#fP5&$-vS?ba_CNpt%^iJ!SIYQn;DV1Fw=py{vGJS9$pk z9x|H%;fS;0iu^OH_U+p=wQt{MlJV6qWl6vo0jDPF^L_ZUX@vj1d56{7MYk1A*jJdv zYdUjQrPWp{c+tLnBjfwa4p?{U{ol7Q9G8O^*$-SaasXwJQDpx`k)8Gf7xLdPn%W;Z zKy9P<__GQ16OsNAd4Ji34m5&?ncA5aa~fG~-9*h;&$jAc-+ZBi8f8Z+^Vh*9!6!%i z07i|Gu;Z2at7p@nRASKWM@O}u;NV+F;kQi5Q}oIp$do~4db`%pzI~8m)eon;iP4}J z+7$4ijK!1LBeHJQo7=ZfiOs4X)yH3&b%J;&H)&tM`^|hm$H z8c9oLklP&0okna?%Ps(lV2Hw z#wQKAv!4BQx9J|hfi%y+LoC~^%~WQ(SFDKpImcm+Fjz1v|00D$&BK*lS%AUQ zj@GzqM%9VO9bJvC0Q+u@={Z=cO*#Z+a<@eYVBa$#w@!i`Z4orcG>Jlj&3J+KUm5mmf5Z^S680#UVtd zey1MXatI3)-xUUGJN+h(e2;2&nYH%%0(Zgwi=IW!q(iK37agc4g z@M>~CkLCzzl$F#O5Dhr-*W6cBAu9s6huzY0lgfa$s}X3{L1-)yec9yk_3Kt1GW-O~ z+J@|<`Xkc0)(EF$14aQi%X-!@ROCd#l7pEsCBLQxw zNQcRhDY9+6tGYQyos2QOAe1(Dgfqad{?6LQ2Cjfb?1)cTq}mZ48N&#|yx=&?sE4U2 zwrbtJ3S5;hdPIEVF(%b0FdnT`{6SPCv4dphPxi(zB#fp`&``gS9bP(U^w1= z!gLzjx8JmJYep_Yk}JfgL5Bg+C#q?7G+7C3A3`TP;~LYZt9z`kCI>t7deZ{-lZ#ESu>6DS?zOOn zNz;RTlbQv`Hb#rT zv|aSmap;g~ zMW$B_7=&Fh*=Z0d3_{dHjB%Ehrry59WH!v6dZ1>M`2XIiqLW6tEQo3FY_RCkpLWEq z`>bh;G4;()n65QQ2x{%G^y#OHHfm2(JO+Sk4pyaE3;1CZVu2=vr2We5BEz(8z|-4Y zpJo099G%EwSDJQ9ykK{yifX47JF!Aq#r}MysjS2m#PLFgOL%_5mDS@9`n(~Z2Tra* zhYMjd-hc z+3I_-MpQhedSp}42)p85(>@p^eF*LF$R3wnv4?!H^0~e4kjLgl)QB9Q1b?!OW5u%> zTJJL%^X3d@*Zm(om=Swv*^CO%nXy7WDGs-Ur<_?S7Ste6X}Z1K6+})?j@>X*U)ni_ z2bAN_^#U)0M@DZVbS$*zFDi)E+d^g&0Mb&*StXXv8TaJ}b;) zRoKBYSejzrHTC9(Kr;~Wi&)Ph)7sQEKQJB6V{h$)U*M4!O=q%CHo(vH+F|U%BcEQd zi`jOI))a)LU3p@6rR;80AxX1`Ht9$PTX>K_niKG~0!Yao8F>lFb==+>u58G{L>Qay zAMeqUVXTl=u%nM5U=k7FFmkK42-1UUK|VQ-p;HYPPe@Y4_8vF+OI&t1!-riSgk}1C z2n9mgGZ}EpPRxg2cDms-4lA4s0kxbN{M&I;FMIC0rsb)+6Q)ZH=@5J#4bG?H>9pt3 z61J<;w3xm5W7FmHnG-46*4|r%K!GgJl9A|O_nw>`Kzp~htg5tyd-`Z_myHo+2)v); zL}U#8KYtl>KqVU((i0U)R7z$lu?mh80KFdq(o z$QD505~VL;Z@pql@?pE!kylMmrbnIeTt5Bh!miZulcq=WSl@#{DS!LprVZ?zn@yir z$9`QGkPt5&22AF`Lr5xl)gMj!wXM)O?De*uVxSkjWgUq8rOX2A1{Z_nF>ZcMNqfac zS7qw}uaiCC#@}GTRH!|R>r>HI29T1yaQDHU<|*TB2nj$wE(D(kvGGIr&4nlkw=3jE z5L%GQHy5s8y}va@X`vYwdNm*RmF()@nY`(-Q~W=O=(&b}GDIm9bhfl;i2RzzOS#7j z_pR)}X5txDzE&Jzo^$Ndqo&QGeRQlo8}yBwa;|z8^oSQk$}w`vX;q8qXcE=MVHxrm z;NOrGR1UcTe;%}9twT0Q!N@cWH&kqE89Qw|_Ve}csSCTDZTySroZP{IvOfIJ1}C<) z4<_NG^DOe~X!y z-Rv~UytgD2LQsW6aeIP_A0Z$ldj_#dLt&&_4|;(o;BX>tn=%2iuSw>Zx^#Y%dFy|F zOj06f_jo39j^bFGeH6{76WeH;vMbtMJEJ=O(7*;8o9n9hx@OCxv~2gDX5KAP@ey^| zsOU7aF2(Q^gd*CI#?)@dDGY{yhk&?O1Oj3qaS&yP&xOc9w}-<}tY8l=HUCs|ytsau z`H)cx4-bzg>a)8mRSYx_O^@l1um&%P!*YL*nh;EEpQ@^}spp5{Z+d!s_8`!9RRE$! zM}wjOuZ;r`U9-l#UUZC>I$JVAHeY8>WzRUnT#0W}v!JZ2*}88c>maq=ngxEMHGcmY z2yD`tMjD(ViCT3sVXgT$CCIH3wqw)!T>+m>=727;3@!v1e8es(e!Jaa4}J_4h9k*Mn^R>w%-=QT)vuA zRas`f9bSCpW#vO@5r?F>uBlQ)JE>_6J*lx8GcI|fl`tu(aY}R}ov5n`wyU`@DDJk+ zTh>0q{X`HUwFeUwA88-8XJuxJh7vVn4BnJZM&*4yLDw|PdmIr|$2tm4Y*(FmF=B0R z-30IY{yNMieGqqGa@64;o8*pAtJ(4Gm|6N-6+v9lhXBFOgJS3-4%K^*KG2bxsyEl= z75n`@ryT*GAunQav4gw_JoX|~7tk!o(;eUuPHx#n_p}EZ%{S_DIP%qhfptD(9%RpL zwJb^9{j7Oo9{WiMC}fDv&ri*M$vkFE#Y~p7^HQJcGCwIvQFp{2O$V5h^Q+v|=5{kJ zBR$2p_cT;@_aGl(T6v*?laGhm6e8u^J(WTM1}40b7!MrFjT98S6lb_WSEghfWLw&j zo%K0d4(<&rb&U7);{vCBkH@h~N^R+6b#@nyi%P5;@pl@2S~@yFsv3xmB+`DSBtnk$ z2c{>}u2|$5`{N<=*O9m;($zc0g?Hc;iwn%6y;`XWt69SuV)6iP1_z(cTSizYL0i;O zT9x|Su(>BsuehYDV3)w_sP1NT9JP8lj=dt)QNx;!)=DcGzc!@qiDEC8K=2Rnz%zIW zd3|=59Izo$E~NN>T$SKUO}%%sMaL_?jDbzLg$j@Ln$ zLq0=DsuK+@9zFDPe1S!*>#75zxk)R{>WkRFgiDcCQ4X0V;t0 ztu$Siv~O{_TL0E%<^|`AG5#H%AR(7bQG#~@SGw_I09U&6X7ewL?M@%U6@y`LEX8g| zU_PQO0bSsOYnt7Kz)8qk2tlR013U3~^T3i^BY4pc2fOTCK!P6olwk=w=X>Vs54~@0 zUbHQ@RBGoJ%p3J|3LsBoHx^rVq_B5W(wz2?e4G^=me$PLxmC=enK@x1g+;__*Q{&= z5yVYd5$?;CLI1(!AK_>`2!r(e0D6EKMAvqcZvvi#V0Pi*89*6AaVXKbOBcCCaNW5I z7zD2>e9i$QscP!pOdr-;Qq3Yz?u|g#03#lvH@K64_Tfx_$WaB}0oDhb$#!dn z+VLG$N9sFYF;^Jb-CvOa8oE^|O+l4@(jb<`dK-c0tDQPJrD>o_(LFi>Z|Lk`an2N^ zszF!1oTk4-8Jc2cusrF+LZ=LOiqlnt{pH!DLD>{0g zmqgyDgoxME#&k$wA^cr zrM?;23>wACpb0WnnQCA6F;2qbQWI7Y#S_BvfZBss_m;A zELShgJ)G>7GtE`0;qRO4O&YK+7a&p)ATBh`Y4HDDr!)IR{XcOgGe>e4EKW)5|NWU{ zCpVd`sb_v<{x6B$^li(&)XATjI}CJ2Q$PC{Qa7;O=UcpF>Pr3VN%P+gVryelV?2Ev z<{VeRUAtU!x!v>POwV8hH;AkNCOWd^9UTo&R%)<6nIju*5BC7z=bj9%kt)qimDeLG zNh_W?xK*J#Ig(CcLCx@kE5K&~dYY^-PU7gLN^N}^=Vxh#OdLJ2r@9M(nOUKr1XwgW zGgwb51ug9KxFokvb$NiJnnp#`j&pNxA_N(Ra(`<#b8n!j2}%7Zy}1?wb@v}}mhJvT zc|#+=x}|gVR@P1T#}$>4$HuBAI&0OF9c`}gwuaSkqj8TX;r3~U6wbrj<3Ufkik{|k z_0&uzQ<4AV@}Sl}F=MS^l^OGfRn;qD`%vSmu*k7}B<%ydo*3heXK;N)`T&=WvlzL- z2Ce_QrZuTwy=%Tcuf&ZTK>%<#fFBCuq5!_FDA0!hXTj|cb4Vhm6I27**Z~lU2mfOJ zmpNzp6b|0aVPfhy8)tvNUOF?i|9x|3Ht)W_*s|Kpj(x=}%xUR=o;s)wg(}n5x!(UB z@78>&(o9=#ZT`F)$Ni(2w%(Y=yUQ|!;uJbTW~YD7v|00S#A4ReW!{mx#cX*)BMp}> zu>8s(c1#Ra4(r#bIh3HO%~9FG0cO+3j5BUQR$Bb*x_d0?&}t$C^}$ZXms2=H3_P7Z z?TCKR?r=m1x?>&HRXJ29Lkw!7wg5!xth7XFf*?**CaM7?p~f%L)vjhLl#HfhAJwZ} zgQMG4r=M?BJ}xH9JEPFV9D`Nyp2LO}*};#JeQ=V8+GML*9o^xk{v0(V4A>7yBW;dQ z=ZuQbV(aeMQ{IpjyOC_Jo`{sQXT1pKAS0<`s=hYnO*zj57`8}NRr56|52!l7+yGbg z(}j`|mDxLg5eQb5prWB@;q}#U%C|XCKJoI`tQPQc2QaH_&;$j5RN`=X18&70^o4AQ zA9q1X>vP$BPN&DqcCT8zf^`&GX6GC(otLM;%g@XE5EkxE9XZQ#p$5pD0t-LQGN*~T z=kvpbYJD@$N4CyBpC2Vu>+Q{ztyM=<@^??4%K3@X=@z*w>qnnTU6elMH2N~4G(V1d zbMrWroNY6(?G34Vr$thEHv{j!IhB8|r+dVD=8lQ#P6o=pzw)TZ`ofQcZV{du))s*Mg+T(=lg592g5}U8S?G-&tA3 zLH<^=JD)(Nfnirqb1hfiXxTY?rrI2}ovQt$e;_=h*tsxYB(+tIxr3h6FIsWX1^x-P zB+sb2t>|AN=1m_^6@*~klEB+nc{wRosdTa=Bu&gh-X+T4S3oSohoyl;@u(jRyH(xD|8Y}MuM@)xX+=^4#%I8CY6acSBTL> zPBq6uP}K3XOwD`DMM6(JJjJ6Rn~6rUb1t@sET~v^OVmg$>-*wD+_2OJC?PgAfH1^s z0i)c{Yj?4AVavm3`Rsv!-{FV-514}C0Io)`tp{Wnb?tyQ4BCLs6lQO2T(pX<=m%FJ z*wWOS{gx*UsSAcION^=LsO4Fc7%3mFhQybZyO+;E>RS_*mOL>UO$@;eEGu+Z2v&8w zSl+S>+%2C07Mih4h|Td@J7Pq6LS};DichuolxY_}0&5(ebi;Rxw1eeQXWKZpiJi}E zKQNq@o{UO~TIp8jfm)d62@%#y!)k4s2ddr9-oDuKa0w7@ZC*I`_yD#EbG(ccvktq< zCL?6X1seo1perum8d7L2`{e;k54-vfBuYj=9I%<;i3C5X$R)@y{Dn&_J*%_)TocnH zf#`4~XjMz57R+K#Y16prQp*mZfIG6PPV`neb?3(|SLdaE_({tr^4PvBEZaBnUuj#F90w1nw9^?VPg@H>TaHPIw*FvU3t89= z4p`%F27>Y$i>jRr>@t z1gCqJ30#2O9JjtFocLhC)}stEZ_wq8UQjC0F}YG;0B_NJ?w*jhZ-b)?ah`!ZOK{l z;|6cac8f)Vug1iHt3$P@Enb?x5=m_qnAzlw1&@pIKy_)Odac{NH@_IfuVq<&NuHOL zK4KhYedie0u-ILOHIf|ll*d9W@A3tW?4DVpMO1(P&8-%Acp;v{X#?h^%Z_ktzcZ+W z6_?ZIa|3J@aB40X3qdGx?KLb~o@%?zGNCKS(L;OcFJH3E7^JbrKwT6k?I}xlR_}tN zVIO%MIN3k!U2|?3lEfJuV!0;ZsuwWB6 zLVgmUg=OvadxBZivU$1!$_!{NxLQ0>5%urMQiC54S&D?Iq;pEOebO{g1Kp~@hAP)) z2?*3clbTYzfmdwhxze|8^g3Z|aR#t_4nGi`051qWp&$nZpkjgV=ko;-sLgh*TD&s# zw`VM`>nSEbYU~L^>WyQT4-BboFI#>oh)ShBp?N;eS?YnB#@-q}y7b=5o{ON;H71j9 zr0PIhQPm!S#h2eW2vtwTmfsuE>` z;|tGXi!WYe-WY(D+zyfk3oSovPHYtHUTht=O9u39DCh!gA-nZ_?BVm@wETM}x$Ztq zQ;3wCx1{{PwUpaEzAuhV^$Y{c5$-TG{F%Vo zmgR3W+Fb_#z=QNIJ(fx9=vZHIr$$oCme^P%*NFVfEHP9dTFDL+= z7FaMp--~QPVZaf?Pa^0H!2--dkKxw@{4qGbKzdXl=>VYFPHq#a7xVM~Xc8NThI`so z5r59+tDmVIZvV*57w()Iu29b!U3Y88+a}6X&j>O}#hV89I5ig#ehd}gWI&Z%7Jt8R zr9{;M_3{3~)$CX~tld<9gdM)ZxL%^VfN>kk14J>$tZ3l1+c_}ER`4R^fbqg{0syRG zAgU>V^MnVlY{N~NXM%Br;q(F(+|j=O)84m#$5CDBB3V_os!D2uZH$RQjX@SBepGdJ zb$0;<^lYi6*1Of}j)PrQU9D%g-p>FrnPgy+31i^KColR~43lKy%w+-w z9LP!_2?U$mB!DLg=4LJK8get-@0?R=wLQ6O<`1|(cv-4PtNPSA`|SPgZ-2Y`!a{1Z z^u00y-eV0>x1dAoupn%^z)PTbmCsc9fPAn;@{@-Jp|NZ?x$h&=?L`uab*N$w@6b-A z*9x8MgpsmhXXsnV@TJmj@>I3(%R|S0I+yAbhIut?km) z<>Wg*=Z&2=lDn=%=QwmIQ^@t3@tET~q|3;ehoEwg6IYP$QgyyZHwy;~iK`@^AVek? zmm1m3#DuOPU$_LyJU8@7M@3!Dbq3oDFaIA2$-?}k8i7U?02X%D1LM*rvUiL8w3tYM z6BH?EAJVH@db^&JjP?x zGv)H{L}z&17(~acvp3Df-mzdS=g5|9n&n!M{Qcch9r?$rG2LgAQ%!CY=fQ@WyBCo; zdE_zqxs~H=rnBSJ`?-!ByjEJ?u7eS4hl4su+y-JI11M$y`zUbBfD4H|v{?YD5!^)? z=w*1X1j8@WLEI}HlvWim5N%7fBCX{bYLepQi6*J{B3f&Qs(6}H;{g#Zc1M2&@|Sy& zh2Pzb*PL#au9s0ocmNgO)E}i)MV;g~&C*rm?!Dsqkd4f9sO@(7I>GKuj=uOi0+W%7ZErMY*N4Tj&s#N~VrrmymDwIw1pb#6GKqJ!!(6F9gjS`d#_-vZ4pKe17t(A$1`jg}0xZRX$ zoG-k0x8g4ojC69O#1~xk350aMZ>*DN?#`J~Z$#H_BR|_G?G&vxcPTQiJ|pwfSL998}K8O`=yIldYVmTq-V)>u4D)_vWC<0jWrh+?s28}%fw-AD1k9L zXNgRP+sDAfroB!|z7KQb#QwIap6PZ*?iGe6+uP9#gOZBa&h4Fpq4ZElc0qug8ijGp zxS_BL;vAe{8zmbV{HJA*4$NSC4hA6zVFVp8e)$H_IwrX*P(8Nh8w$V8OOGy4?$?i5uw0`H(vHHH7o@KVHMCxUs?F01W8Slb zSGLe&SXw0CT)myBw@BLy2XB@#YlWDW9!l`n3VX6^)uIXQ5!&ws?hM#e^vs7JMc2nu zd!(m?oSW9hSndht?R3RVzcxy1kq(fep%a?)o+g`3LAC>L{H*i?L9Nd%H_-^>06Fxb zbX*vXMi(6ho8cOXeD{#@q7Y7J>cdnkt<(l4Q^^c;mw+uZbNcf;q!U7Wy$jcjrKnyi zliwDCT2lrrC@!mMlnQg_@eY}&q;$h+Dr~NuDO6*a}Rb*^^fIS3rqJ({~P`<`HGEV-}{r!Tvn9Jo;0B(l@vMLJBS zK+l89gPNVfZTCwrlx??kw0+Rg?g8JaKqw-X9vA+80fa2mJZpB3h0^+R8k>ei6g z6?scFN+ZW^M&;zvPJjCjKl#~%(#`8pw0LDZFQTOO*H3b+A5B zJU$Ba8qi2&0-7f%-=y#dAP(q$%%L_OW%jHZmr2!p61B~4W^S_BZ3=bRBTz)5<(-h2 z@cb3pr-%N1DDF%SL&9Z*3Y{?FV7l}u#^+A6j#w7Igexu0Kz9H);aU0Jl*&Ro2GJwl zxsi`hh6{@5A%ZjmX$>|=D})(x{lCh8RXF)|={iwJ^hUb+=o(3q)>jlC_n^aCixcD^ zum5%B9ig!?9fjGVoT@`V!R);E4Mh3_2^8HItdq*NGSSITHwQF=Up_{Dc?!*59(@5} z?&6cu24Q9}>?FvcC#B29@%E{qiFR`Rer1~w&O1rOp`+pzLZj0*(@rXnNY8l}Q*t;h zydi;aN`aNTSdx_ARn|sHn-our@q2-%?InATO3QVI;iBUcKpCtQv<(TOnF*YXAPi{+ zBudb8!$8FXVAA&r7Z!efRQi2c;nY*o->wrA&fsX%6h+Mk=s^5PnD0#02YK}I-JdF# zli%)=pAotToLH|%a{HOGlXSnLctJ<~Kzua9>dSJ#KDL?c^U?@tX3A zkTTkrFpxV0Z?steVsr$i7C0(&ihk(9R~3Q0SRwrnQMLO=m%WF&>kZ{OVY#~roK%hr&`KSoNK>adaNshhlnU~#lgjsnOs*BRKM3{M z!QJG@p9|jvQ!c9w(I%=ucpYI;2Rm(%g;&9&W#?k#{GXR2roeMXERfmI zBq4)1A3h~;Vg$OGfD_1|k3t5F8r?&W*8W5i%F3_oBYU2bE-BRhRJyC||7CPqqq9_U zDffwJXD6%QX6t>1^3bwiY`Oj@UBqlAw^#p}z1sh}lo!Q(XTlz5tqa&X^$b=|Sc`}FVxZyRGxvWB|m!#2oif4$Y>Qh=e#nmn2 zi%auuN%9YGV>PjxyyzI+49G9ukpdO;?!%a?K(g3<6cVMd7Q2yVKNziJ1Nq%)=?`_l z>%$hDRCJlJBFGhIAt0F1X^0O}a0s<6fe6ya$i&dbirar9-CIr`yI&T|`~2kRzms+x z4l0{B4AwDeg}xj2?VFvL#Qr#V4SCa1Hj?bSQi$C5uJmzIc1HSa;m$MCFU!eAKa@5W zZvVaXjwnVJ=Y~yIEj|F4_6HwG&x?i|>>6fO>D(BKbD7at4v;3bjxcXO6k4X-ktI$O z#Ex)U7)(d!IB8(j{0Z}kspKSMF7dsES-ZrP-3-F*gVk_>&$4zx{ggi1!1#ddoe4wr zjoC)#h?H6ibs8@Pjz9Ob35?y9^aZXbzE|Z7$fJUc0_ghju8t0F8M?+sdh-3`@r%nh zl6|6FEjo*{<9eQ_0%yDWG_VoSO>`4^M3j%L-1)#Q^2dm{PSyl=JB2#vyJNR9v&l*V zMDO;M-RFF=p%S+4flB!rxNbVSwS;Uvg8D+*;Ym{i=$r{~-KEtM=q?qe3~y4uA8&Mv zB)?Hcun&!rgHI`I3loa`>^dP4Y3oJYe;Xw|dV9?aVs11vs`aeC2r=5#n@V{HRyXj> z{4~ISwERexypHx>!v(v2laeHCGn#9OL#P=<|B}r}?2Oq9y0>Mqjf`$YLSJB^!{!db ziUV*=G|idiK$Ewo%$Biyy!f(Aa^h?7Nk8S6KPo1=mqA3L=n!(R-P1F3sEa&Vy@{Oh z%SQxulA>Q;9LFvw@q^RbqM?W>AG?KncK5U7#;0;x*y#NtlcMSIeK3DuT)OZc^4;4V8dAk5yY%b6?2A!q~2xOtP2%44|B6>2Z+1WkTAE0`$Dsg%~K@~67z-FT-?!( zQEZ!S*K!88={LnNigT^%au@mAZj2&3NB0o!Ze-~xj)@{7tF6P3(T<`auytJYyhA8v z!<|4WU^b*?VX*C>7DPIlt!Q$pSAK{5@HbMp;Om!9t|NavqM5O&XufD<5-`z`Y&nPL1Lfd5hu$ zAsJbM?D@*?p1Vt}xY#;2(Ax4J%!HjVio{KQIkKH9a?iC8FnK#xHrfx-aQ!LiN; z3}jbPxTm2Lw&jwkw}s7xcNXLrw34R`Eqcz%6{i!+4K3}(2B5d?To=8n z0FR;#2^c$%6PUcM2j$Hb;-Wd57&NVCp!^U0=jA17h-r<1(STxhZf`|6>Rs;YrP1(7whL!q#Su9zSiAgZzhp$u}caILYEIZ!!9xg+1nesi1L zQ6V-DT4SU2-p0^9lkS6LwdZVku6=xX*yvhynZ#IsceEa;+HG<|WakDs+rvBl7k?#x ztS*cGj9H|ofY~|dJdI*!nm5Rz6iyG@Kn_ASGT9N>e7XxQtI0?r$;~C($LYcsGV?tEzYsrW znWe4B4bLo*$15ez&KDz3!EgrB3ycKH7j55iq!Wa4>WmbPtr!hPM`nA9lguF3Jto(S zEn^8CmCrQF7lE{<=Q?;Qz&p-F4!q-U9+$hxsjJqk1x36v@m9uMwWGp#^y_R}9 zES3)>gEKBA(0N-|T%W}L!jdJ_dlkod{8H*-Jf#Ei2fo zyBITQC~DsNS~13UsK5cR=&2pEJbR5;@eZW z>c?`rjBQs1xZB$i&DXc5Va#c7NoMh1`2gPHTxV!7=T#RL-G_;`(ax#DWv?noR%NZ( z2_&$Q$)$Il7DD+RBzkB?(yF^EMn@K`s26iEf3+i@PBt(?VM#b)pffkV1FLj6%i|JS zCe@*7{-JD1&vZ>KAkX_1{L$jRBMh`I=0K?_Zbr;sV3EXwzd|IDa@s~SUA%WnXdpkn znBV{kJIr=x>2kW(Sz0{fCIT3b;>=}>omqv&x$Xpnm}nqC2U^re$2#!&D_EG1du5UN zM&|tyZ7xpO4;&NYulN}+n zAH$|4ClyVgRpVK`n#r-}w-bF3f3%Od=PMfv_JqI zD6NlF45vSL#t3y+&ywH@_pxWJiT^0~xEp=6rvJ(oG(FNvb5* z{Z86I4#pHeQADMkq(sFeFAm66%WT14WI0UNb%KIT}34!DXnnIaSVY&CN~#vmSxSClGBj}f3^DXiE=90aP#iY%> zuF=CkpG~}Es;?Wa|B3R_fNFlyoLHn4K`Dw9cy_h$5#^)QGhXUyNQaaUxc5@bHR|$P;Lo9dEN%J>!3XeWvbqi?4e~Q^m=KXVVU@i6 zzHc*m;!@=s2{zP*Zo=v zk%#?CC;5Y4>8iNmihX&~dzq3Vk6fl)LmqisvB`C{@+LC04O8%|+mtj}x>b3OynH!c z8D7-o9nOCLF zg=;lMD=+IK@BUP}l$_QTMWAULKSjpeKY@(#=pb#FI--+$Xs_K}*9()V#R58z74BS=44er*?LYeyS9=>n&=f%Rfr(dV7v zlDk}dnYC+xd@(NKQtuF!rT{Zc%n&4b{Z|t0*T{z7A6M3$PjgYQ9kbIDhLx8X6i9|%lB-G0r<5&)A4Zgc zii%XrQiQzR1~HozFXsiJKN}y4os%jO8)uWlo!+u%&L;~G+a}Yd>iI%!ab{d2kuJK< z9_l|eF`OTws8T5o8~yLq8$^A!IoalvNEn*dWTL-?llhF6Wk?n>T3dK|F@aK?@g~eB zb`NzA*cL&4+N=Zwb{qA+C`q5+$2jStcjGE__g0H?M`>7?L4uWLk)YEC9~4VHqndE2 zOc&ld#KN+A7>qgaeXqx+{oAM#W}EgXnS2(bd|yoY@s>g8#_rp969r=Tk9t2fLne2$ z@R#jMc%9hVG~%G>VP%v3QN>(zCPiNUg*3?Fm1MqszO5KbpB9H3$8wFO+b`R?bGbxk zKR1GG>zlT_8r{f4=&))i(5j8^X3-jEkqVGuzyWL|^FLV?2rSRf- zM~Q#Wg7ND8u%2dHLoC+7o&1r3|8MdN-eE?lSzk)Z&2T z!BiSqNXCJyAF@;wLg-+x2OUi_J$`E1QYsZ;V5q-0&TT*Q<(E2oM&ZVwK8BA|W;wVx zSUm3|*FUWMONBmW#Ci0x8v6{4hJi!C)$`!bi4GHGXFW$t;ZLD+GBF-60Z!QD9k53iF<{Htczxa}5w{vPf?+2|nI4}%B?W_oG~n5nKhk$?)}Edx%Z>EK5b-NGT)7<9uR`)6zmEx<7W zCV zEm(y{i$hzn*wSZ{gWrddV2)Bp&vYCjuq%#|*c-Fvmgf2LXEUG9Ur|j)gUjA~W~15U zjxCOy6AFnV4e{Kji7cOD?S?m|6p;S7105ts zCg)IU^{TR;G7XLi(eAdCbMDL6B8j$KaYylRrGe(grX*eXY!49Axv@NS6&b9=PR($f z#yfsXCD_n_A1Vt^d{23+ybg(N&DBFtma`yF|VDxXQ64m~`RLUD=|Gz2g$R97LdO^^`xxN(lmDm4S)#c=l=Bj7J!Lh(p z+yl5*F7izD?730{oEu0M1ku8%jih1+THUjTIAVlToX2IY91x6WfM&k}W58oPJ~Icmg;t zH2tyq*@5k@X==fM6+|4YYgkZLz(J`380XL&200In_E5zXAP4@s>Y^Gw1Rk(%n5<)F zR>h8y)$E|1L5Dlw?k;)s4qu?o48t!3TiDbB5!cRUD2_{ceSwIrTPVn|p~4215hF+W zftCg!l?adtT61bRkOdRTf?A|wxkkis9P+@wS8uHaz#51cI<%2PnwwQMO~>=X?R1Qg z5z$o0N;)BxX>R!;xQW?8Jh*2ih9fhL{UM!Z2hD>bGw2hDnRI%5$wKfwl0O@3r zgK&dp(4s9yvBp%|Wln`Gh-P9eHFE1j*=9eORe>zrXFNC36I4fx{zcbxLipAw^&ve6 zd^>9~AmGU0WB_{zpl^(D*u;exE?ZY2IAR9SyYtR^-)6s#r!r7LlC=#~9Yq32yJc;x z;ta$_Gmd7FV?Dkdpsnjjj~I9fP+>OotcigJ#A%06$b-i~9a_fBdX#Rz4M9g2qOx|@ zLCd@>uy%*GUQ|O7I09I2q)j~~S%s*Cz<6Cqy!?-<4aTvA;N=@u%$=-_2K%rL)lF0( z0)No80II4S8(6&_v5|cP>kXBGUoYS64{B7*4Z77q-2pgZn5aI%02#W5c+z$(QuF7& zJ$1nPv$WVu4*>VZ&I7J0)VagR97C1~Lf5Dqr^QDMzVlm<8Pr0mZbSUqLMKZz0-J?y z=uqgzkcLe@j*g2N4>?#QJZRw>ID{bapm#Y|ONK_VHcGCbO3cVnH35{hWl^$DPX{4K z2OdI1N7*{&I(?hu{PTTV>ud}yNL6?V*$A>H;0j?L;11CFk({-`QFOyt$JAnOxg?Uw zLM7PLkZ=UV9l;sQHmG+4@(q~e&Su}%?T|ymi-T?;#!|)n!V5%=EEX=@L(q9J-Kk5? zvL1d^g+W`{9GG#0kedr*qo(bu;g_^^VhJuYOb%O#>d_)aZgjE~*@dq>b+YqwDoTt_f z%sb34kXC`11n^C;Ui4a8Zsk~xX+*GM9Gt)?;Z0d!n?b-9Pp2WoXKJoB89R)@vhmFw zRJB4GH3JDmKrOH*`0FNHE(N9E*V9vY|3RO>yzuvb