diff --git a/README.md b/README.md index bc36f07..5d87a80 100644 --- a/README.md +++ b/README.md @@ -269,3 +269,28 @@ npm version minor # or major/patch npm publish git push --follow-tags ``` + + +## Browser Compatibility + +As of version 3.13.1, rss-parser is now fully compatible with browser environments, including SvelteKit and other modern web frameworks. The library automatically detects the environment and uses the appropriate parser implementation. + +When using rss-parser in a browser environment, make sure to use a bundler like webpack, rollup, or vite to properly include the library in your project. + +Example usage in a browser environment: + +```javascript +import RSSParser from 'rss-parser'; + +const parser = new RSSParser(); + +(async () => { + const feed = await parser.parseURL('https://example.com/rss-feed.xml'); + console.log(feed.title); + feed.items.forEach(item => { + console.log(item.title + ':' + item.link) + }); +})(); +``` + +Note: When using rss-parser in a browser environment, be aware of potential CORS issues when fetching RSS feeds from different domains. You may need to use a proxy server or CORS-enabled RSS feed to overcome this limitation. diff --git a/dist/browser-bundle.js b/dist/browser-bundle.js new file mode 100644 index 0000000..3a898fc --- /dev/null +++ b/dist/browser-bundle.js @@ -0,0 +1,886 @@ +/* + * ATTENTION: The "eval" devtool has been used (maybe by default in mode: "development"). + * This devtool is neither made for production nor for readable output files. + * It uses "eval()" calls to create a separate source file in the browser devtools. + * If you are trying to read the output file, select a different devtool (https://webpack.js.org/configuration/devtool/) + * or disable the default devtool with "devtool: false". + * If you are looking for production-ready output files, see mode: "production" (https://webpack.js.org/configuration/mode/). + */ +(function webpackUniversalModuleDefinition(root, factory) { + if(typeof exports === 'object' && typeof module === 'object') + module.exports = factory(); + else if(typeof define === 'function' && define.amd) + define([], factory); + else if(typeof exports === 'object') + exports["RSSParser"] = factory(); + else + root["RSSParser"] = factory(); +})(this, () => { +return /******/ (() => { // webpackBootstrap +/******/ var __webpack_modules__ = ({ + +/***/ "./lib/browser-parser.js": +/*!*******************************!*\ + !*** ./lib/browser-parser.js ***! + \*******************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("\n\nfunction _typeof(o) { \"@babel/helpers - typeof\"; return _typeof = \"function\" == typeof Symbol && \"symbol\" == typeof Symbol.iterator ? function (o) { return typeof o; } : function (o) { return o && \"function\" == typeof Symbol && o.constructor === Symbol && o !== Symbol.prototype ? \"symbol\" : typeof o; }, _typeof(o); }\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, _toPropertyKey(descriptor.key), descriptor); } }\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); Object.defineProperty(Constructor, \"prototype\", { writable: false }); return Constructor; }\nfunction _toPropertyKey(t) { var i = _toPrimitive(t, \"string\"); return \"symbol\" == _typeof(i) ? i : String(i); }\nfunction _toPrimitive(t, r) { if (\"object\" != _typeof(t) || !t) return t; var e = t[Symbol.toPrimitive]; if (void 0 !== e) { var i = e.call(t, r || \"default\"); if (\"object\" != _typeof(i)) return i; throw new TypeError(\"@@toPrimitive must return a primitive value.\"); } return (\"string\" === r ? String : Number)(t); }\nvar xml2js = __webpack_require__(/*! xml2js */ \"./node_modules/xml2js/lib/xml2js.js\");\nvar utils = __webpack_require__(/*! ./utils */ \"./lib/utils.js\");\nvar fields = __webpack_require__(/*! ./fields */ \"./lib/fields.js\");\nvar DEFAULT_HEADERS = {\n 'User-Agent': 'rss-parser',\n 'Accept': 'application/rss+xml'\n};\nvar DEFAULT_MAX_REDIRECTS = 5;\nvar DEFAULT_TIMEOUT = 60000;\nvar Parser = /*#__PURE__*/function () {\n function Parser() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n _classCallCheck(this, Parser);\n options.headers = options.headers || {};\n options.xml2js = options.xml2js || {};\n options.customFields = options.customFields || {};\n options.customFields.item = options.customFields.item || [];\n options.customFields.feed = options.customFields.feed || [];\n options.requestOptions = options.requestOptions || {};\n if (!options.maxRedirects) options.maxRedirects = DEFAULT_MAX_REDIRECTS;\n if (!options.timeout) options.timeout = DEFAULT_TIMEOUT;\n this.options = options;\n this.xmlParser = new xml2js.Parser(this.options.xml2js);\n }\n _createClass(Parser, [{\n key: \"parseString\",\n value: function parseString(xml, callback) {\n var _this = this;\n var prom = new Promise(function (resolve, reject) {\n _this.xmlParser.parseString(xml, function (err, result) {\n if (err) return reject(err);\n if (!result) {\n return reject(new Error('Unable to parse XML.'));\n }\n var feed = null;\n if (result.feed) {\n feed = _this.buildAtomFeed(result);\n } else if (result.rss && result.rss.$ && result.rss.$.version && result.rss.$.version.match(/^2/)) {\n feed = _this.buildRSS2(result);\n } else if (result['rdf:RDF']) {\n feed = _this.buildRSS1(result);\n } else if (result.rss && result.rss.$ && result.rss.$.version && result.rss.$.version.match(/0\\.9/)) {\n feed = _this.buildRSS0_9(result);\n } else if (result.rss && _this.options.defaultRSS) {\n switch (_this.options.defaultRSS) {\n case 0.9:\n feed = _this.buildRSS0_9(result);\n break;\n case 1:\n feed = _this.buildRSS1(result);\n break;\n case 2:\n feed = _this.buildRSS2(result);\n break;\n default:\n return reject(new Error(\"default RSS version not recognized.\"));\n }\n } else {\n return reject(new Error(\"Feed not recognized as RSS 1 or 2.\"));\n }\n resolve(feed);\n });\n });\n prom = utils.maybePromisify(callback, prom);\n return prom;\n }\n }, {\n key: \"parseURL\",\n value: function parseURL(feedUrl, callback) {\n var _this2 = this;\n var redirectCount = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : 0;\n var prom = new Promise(function (resolve, reject) {\n fetch(feedUrl, {\n headers: Object.assign({}, DEFAULT_HEADERS, _this2.options.headers),\n redirect: 'follow'\n }).then(function (response) {\n if (!response.ok) {\n throw new Error(\"HTTP Error Response: \".concat(response.status, \" \").concat(response.statusText));\n }\n return response.text();\n }).then(function (xml) {\n return _this2.parseString(xml);\n }).then(resolve)[\"catch\"](reject);\n });\n prom = utils.maybePromisify(callback, prom);\n return prom;\n }\n }, {\n key: \"buildRSS0_9\",\n value: function buildRSS0_9(xmlObj) {\n var channel = xmlObj.rss.channel[0];\n var items = channel.item || [];\n return this.buildRSS(channel, items);\n }\n }, {\n key: \"buildRSS1\",\n value: function buildRSS1(xmlObj) {\n var channel = xmlObj['rdf:RDF'].channel[0];\n var items = xmlObj['rdf:RDF'].item || [];\n return this.buildRSS(channel, items);\n }\n }, {\n key: \"buildRSS2\",\n value: function buildRSS2(xmlObj) {\n var channel = xmlObj.rss.channel[0];\n var items = channel.item || [];\n var feed = this.buildRSS(channel, items);\n if (xmlObj.rss.$ && xmlObj.rss.$['xmlns:itunes']) {\n this.decorateItunes(feed, channel);\n }\n return feed;\n }\n }, {\n key: \"buildAtomFeed\",\n value: function buildAtomFeed(xmlObj) {\n var _this3 = this;\n var feed = {\n items: []\n };\n var channel = xmlObj.feed;\n if (channel.link) {\n feed.link = channel.link[0].$.href;\n feed.feedUrl = channel.link[0].$.href;\n }\n if (channel.title) {\n var title = channel.title[0] || '';\n if (title._) title = title._;\n if (title) feed.title = title;\n }\n if (channel.updated) {\n feed.lastBuildDate = channel.updated[0];\n }\n feed.items = (channel.entry || []).map(function (entry) {\n return _this3.parseItemAtom(entry);\n });\n return feed;\n }\n }, {\n key: \"buildRSS\",\n value: function buildRSS(channel, items) {\n var _this4 = this;\n var feed = {\n items: []\n };\n if (channel.title) {\n var title = channel.title[0];\n if (title._) title = title._;\n if (title) feed.title = title;\n }\n if (channel.link) {\n var link = channel.link[0];\n if (link._) link = link._;\n if (link) feed.link = link;\n }\n if (channel.description) {\n var description = channel.description[0];\n if (description._) description = description._;\n if (description) feed.description = description;\n }\n if (channel.lastBuildDate) {\n feed.lastBuildDate = channel.lastBuildDate[0];\n }\n if (channel.pubDate) {\n feed.pubDate = channel.pubDate[0];\n }\n feed.items = items.map(function (xmlItem) {\n return _this4.parseItemRSS(xmlItem);\n });\n return feed;\n }\n }, {\n key: \"parseItemAtom\",\n value: function parseItemAtom(entry) {\n var item = {};\n if (entry.title) {\n var title = entry.title[0] || '';\n if (title._) title = title._;\n if (title) item.title = title;\n }\n if (entry.link && entry.link.length) {\n item.link = entry.link[0].$.href;\n }\n if (entry.published && entry.published.length && entry.published[0]) {\n item.pubDate = new Date(entry.published[0]).toISOString();\n }\n if (entry.content && entry.content.length) {\n item.content = entry.content[0]._ || entry.content[0];\n item.contentSnippet = utils.getSnippet(item.content);\n }\n if (entry.summary && entry.summary.length) {\n item.summary = entry.summary[0];\n }\n if (entry.id) {\n item.id = entry.id[0];\n }\n this.setISODate(item);\n return item;\n }\n }, {\n key: \"parseItemRSS\",\n value: function parseItemRSS(xmlItem) {\n var item = {};\n if (xmlItem.title) {\n var title = xmlItem.title[0];\n if (title._) title = title._;\n if (title) item.title = title;\n }\n if (xmlItem.link) {\n var link = xmlItem.link[0];\n if (link._) link = link._;\n if (link) item.link = link;\n }\n if (xmlItem.description) {\n var description = xmlItem.description[0];\n if (description._) description = description._;\n if (description) {\n item.content = description;\n item.contentSnippet = utils.getSnippet(description);\n }\n }\n if (xmlItem.pubDate) {\n item.pubDate = xmlItem.pubDate[0];\n }\n if (xmlItem['dc:creator']) {\n item.creator = xmlItem['dc:creator'][0];\n }\n if (xmlItem.category) {\n item.categories = xmlItem.category;\n }\n this.setISODate(item);\n return item;\n }\n }, {\n key: \"setISODate\",\n value: function setISODate(item) {\n var date = item.pubDate || item.date;\n if (date) {\n try {\n item.isoDate = new Date(date.trim()).toISOString();\n } catch (e) {\n // Ignore bad date format\n }\n }\n }\n }]);\n return Parser;\n}();\nmodule.exports = Parser;\n\n//# sourceURL=webpack://RSSParser/./lib/browser-parser.js?"); + +/***/ }), + +/***/ "./lib/fields.js": +/*!***********************!*\ + !*** ./lib/fields.js ***! + \***********************/ +/***/ ((module) => { + +eval("var fields = module.exports = {};\nfields.feed = [['author', 'creator'], ['dc:publisher', 'publisher'], ['dc:creator', 'creator'], ['dc:source', 'source'], ['dc:title', 'title'], ['dc:type', 'type'], 'title', 'description', 'author', 'pubDate', 'webMaster', 'managingEditor', 'generator', 'link', 'language', 'copyright', 'lastBuildDate', 'docs', 'generator', 'ttl', 'rating', 'skipHours', 'skipDays'];\nfields.item = [['author', 'creator'], ['dc:creator', 'creator'], ['dc:date', 'date'], ['dc:language', 'language'], ['dc:rights', 'rights'], ['dc:source', 'source'], ['dc:title', 'title'], 'title', 'link', 'pubDate', 'author', 'summary', ['content:encoded', 'content:encoded', {\n includeSnippet: true\n}], 'enclosure', 'dc:creator', 'dc:date', 'comments'];\nvar mapItunesField = function mapItunesField(f) {\n return ['itunes:' + f, f];\n};\nfields.podcastFeed = ['author', 'subtitle', 'summary', 'explicit'].map(mapItunesField);\nfields.podcastItem = ['author', 'subtitle', 'summary', 'explicit', 'duration', 'image', 'episode', 'image', 'season', 'keywords', 'episodeType'].map(mapItunesField);\n\n//# sourceURL=webpack://RSSParser/./lib/fields.js?"); + +/***/ }), + +/***/ "./lib/utils.js": +/*!**********************!*\ + !*** ./lib/utils.js ***! + \**********************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("function _typeof(o) { \"@babel/helpers - typeof\"; return _typeof = \"function\" == typeof Symbol && \"symbol\" == typeof Symbol.iterator ? function (o) { return typeof o; } : function (o) { return o && \"function\" == typeof Symbol && o.constructor === Symbol && o !== Symbol.prototype ? \"symbol\" : typeof o; }, _typeof(o); }\nvar utils = module.exports = {};\nvar entities = __webpack_require__(/*! entities */ \"./node_modules/entities/lib/index.js\");\nvar xml2js = __webpack_require__(/*! xml2js */ \"./node_modules/xml2js/lib/xml2js.js\");\nutils.stripHtml = function (str) {\n str = str.replace(/([^\\n])<\\/?(h|br|p|ul|ol|li|blockquote|section|table|tr|div)(?:.|\\n)*?>([^\\n])/gm, '$1\\n$3');\n str = str.replace(/<(?:.|\\n)*?>/gm, '');\n return str;\n};\nutils.getSnippet = function (str) {\n return entities.decodeHTML(utils.stripHtml(str)).trim();\n};\nutils.getLink = function (links, rel, fallbackIdx) {\n if (!links) return;\n for (var i = 0; i < links.length; ++i) {\n if (links[i].$.rel === rel) return links[i].$.href;\n }\n if (links[fallbackIdx]) return links[fallbackIdx].$.href;\n};\nutils.getContent = function (content) {\n if (typeof content._ === 'string') {\n return content._;\n } else if (_typeof(content) === 'object') {\n var builder = new xml2js.Builder({\n headless: true,\n explicitRoot: true,\n rootName: 'div',\n renderOpts: {\n pretty: false\n }\n });\n return builder.buildObject(content);\n } else {\n return content;\n }\n};\nutils.copyFromXML = function (xml, dest, fields) {\n fields.forEach(function (f) {\n var from = f;\n var to = f;\n var options = {};\n if (Array.isArray(f)) {\n from = f[0];\n to = f[1];\n if (f.length > 2) {\n options = f[2];\n }\n }\n var _options = options,\n keepArray = _options.keepArray,\n includeSnippet = _options.includeSnippet;\n if (xml[from] !== undefined) {\n dest[to] = keepArray ? xml[from] : xml[from][0];\n }\n if (dest[to] && typeof dest[to]._ === 'string') {\n dest[to] = dest[to]._;\n }\n if (includeSnippet && dest[to] && typeof dest[to] === 'string') {\n dest[to + 'Snippet'] = utils.getSnippet(dest[to]);\n }\n });\n};\nutils.maybePromisify = function (callback, promise) {\n if (!callback) return promise;\n return promise.then(function (data) {\n return setTimeout(function () {\n return callback(null, data);\n });\n }, function (err) {\n return setTimeout(function () {\n return callback(err);\n });\n });\n};\nvar DEFAULT_ENCODING = 'utf8';\nvar ENCODING_REGEX = /(encoding|charset)\\s*=\\s*(\\S+)/;\nvar SUPPORTED_ENCODINGS = ['ascii', 'utf8', 'utf16le', 'ucs2', 'base64', 'latin1', 'binary', 'hex'];\nvar ENCODING_ALIASES = {\n 'utf-8': 'utf8',\n 'iso-8859-1': 'latin1'\n};\nutils.getEncodingFromContentType = function (contentType) {\n contentType = contentType || '';\n var match = contentType.match(ENCODING_REGEX);\n var encoding = (match || [])[2] || '';\n encoding = encoding.toLowerCase();\n encoding = ENCODING_ALIASES[encoding] || encoding;\n if (!encoding || SUPPORTED_ENCODINGS.indexOf(encoding) === -1) {\n encoding = DEFAULT_ENCODING;\n }\n return encoding;\n};\n\n//# sourceURL=webpack://RSSParser/./lib/utils.js?"); + +/***/ }), + +/***/ "./node_modules/base64-js/index.js": +/*!*****************************************!*\ + !*** ./node_modules/base64-js/index.js ***! + \*****************************************/ +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; +eval("\n\nexports.byteLength = byteLength\nexports.toByteArray = toByteArray\nexports.fromByteArray = fromByteArray\n\nvar lookup = []\nvar revLookup = []\nvar Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array\n\nvar code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\nfor (var i = 0, len = code.length; i < len; ++i) {\n lookup[i] = code[i]\n revLookup[code.charCodeAt(i)] = i\n}\n\n// Support decoding URL-safe base64 strings, as Node.js does.\n// See: https://en.wikipedia.org/wiki/Base64#URL_applications\nrevLookup['-'.charCodeAt(0)] = 62\nrevLookup['_'.charCodeAt(0)] = 63\n\nfunction getLens (b64) {\n var len = b64.length\n\n if (len % 4 > 0) {\n throw new Error('Invalid string. Length must be a multiple of 4')\n }\n\n // Trim off extra bytes after placeholder bytes are found\n // See: https://github.com/beatgammit/base64-js/issues/42\n var validLen = b64.indexOf('=')\n if (validLen === -1) validLen = len\n\n var placeHoldersLen = validLen === len\n ? 0\n : 4 - (validLen % 4)\n\n return [validLen, placeHoldersLen]\n}\n\n// base64 is 4/3 + up to two characters of the original data\nfunction byteLength (b64) {\n var lens = getLens(b64)\n var validLen = lens[0]\n var placeHoldersLen = lens[1]\n return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen\n}\n\nfunction _byteLength (b64, validLen, placeHoldersLen) {\n return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen\n}\n\nfunction toByteArray (b64) {\n var tmp\n var lens = getLens(b64)\n var validLen = lens[0]\n var placeHoldersLen = lens[1]\n\n var arr = new Arr(_byteLength(b64, validLen, placeHoldersLen))\n\n var curByte = 0\n\n // if there are placeholders, only get up to the last complete 4 chars\n var len = placeHoldersLen > 0\n ? validLen - 4\n : validLen\n\n var i\n for (i = 0; i < len; i += 4) {\n tmp =\n (revLookup[b64.charCodeAt(i)] << 18) |\n (revLookup[b64.charCodeAt(i + 1)] << 12) |\n (revLookup[b64.charCodeAt(i + 2)] << 6) |\n revLookup[b64.charCodeAt(i + 3)]\n arr[curByte++] = (tmp >> 16) & 0xFF\n arr[curByte++] = (tmp >> 8) & 0xFF\n arr[curByte++] = tmp & 0xFF\n }\n\n if (placeHoldersLen === 2) {\n tmp =\n (revLookup[b64.charCodeAt(i)] << 2) |\n (revLookup[b64.charCodeAt(i + 1)] >> 4)\n arr[curByte++] = tmp & 0xFF\n }\n\n if (placeHoldersLen === 1) {\n tmp =\n (revLookup[b64.charCodeAt(i)] << 10) |\n (revLookup[b64.charCodeAt(i + 1)] << 4) |\n (revLookup[b64.charCodeAt(i + 2)] >> 2)\n arr[curByte++] = (tmp >> 8) & 0xFF\n arr[curByte++] = tmp & 0xFF\n }\n\n return arr\n}\n\nfunction tripletToBase64 (num) {\n return lookup[num >> 18 & 0x3F] +\n lookup[num >> 12 & 0x3F] +\n lookup[num >> 6 & 0x3F] +\n lookup[num & 0x3F]\n}\n\nfunction encodeChunk (uint8, start, end) {\n var tmp\n var output = []\n for (var i = start; i < end; i += 3) {\n tmp =\n ((uint8[i] << 16) & 0xFF0000) +\n ((uint8[i + 1] << 8) & 0xFF00) +\n (uint8[i + 2] & 0xFF)\n output.push(tripletToBase64(tmp))\n }\n return output.join('')\n}\n\nfunction fromByteArray (uint8) {\n var tmp\n var len = uint8.length\n var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes\n var parts = []\n var maxChunkLength = 16383 // must be multiple of 3\n\n // go through the array every three bytes, we'll deal with trailing stuff later\n for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) {\n parts.push(encodeChunk(uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength)))\n }\n\n // pad the end with zeros, but make sure to not forget the extra bytes\n if (extraBytes === 1) {\n tmp = uint8[len - 1]\n parts.push(\n lookup[tmp >> 2] +\n lookup[(tmp << 4) & 0x3F] +\n '=='\n )\n } else if (extraBytes === 2) {\n tmp = (uint8[len - 2] << 8) + uint8[len - 1]\n parts.push(\n lookup[tmp >> 10] +\n lookup[(tmp >> 4) & 0x3F] +\n lookup[(tmp << 2) & 0x3F] +\n '='\n )\n }\n\n return parts.join('')\n}\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/base64-js/index.js?"); + +/***/ }), + +/***/ "./node_modules/buffer/index.js": +/*!**************************************!*\ + !*** ./node_modules/buffer/index.js ***! + \**************************************/ +/***/ ((__unused_webpack_module, exports, __webpack_require__) => { + +"use strict"; +eval("/*!\n * The buffer module from node.js, for the browser.\n *\n * @author Feross Aboukhadijeh \n * @license MIT\n */\n/* eslint-disable no-proto */\n\n\n\nconst base64 = __webpack_require__(/*! base64-js */ \"./node_modules/base64-js/index.js\")\nconst ieee754 = __webpack_require__(/*! ieee754 */ \"./node_modules/ieee754/index.js\")\nconst customInspectSymbol =\n (typeof Symbol === 'function' && typeof Symbol['for'] === 'function') // eslint-disable-line dot-notation\n ? Symbol['for']('nodejs.util.inspect.custom') // eslint-disable-line dot-notation\n : null\n\nexports.Buffer = Buffer\nexports.SlowBuffer = SlowBuffer\nexports.INSPECT_MAX_BYTES = 50\n\nconst K_MAX_LENGTH = 0x7fffffff\nexports.kMaxLength = K_MAX_LENGTH\n\n/**\n * If `Buffer.TYPED_ARRAY_SUPPORT`:\n * === true Use Uint8Array implementation (fastest)\n * === false Print warning and recommend using `buffer` v4.x which has an Object\n * implementation (most compatible, even IE6)\n *\n * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,\n * Opera 11.6+, iOS 4.2+.\n *\n * We report that the browser does not support typed arrays if the are not subclassable\n * using __proto__. Firefox 4-29 lacks support for adding new properties to `Uint8Array`\n * (See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438). IE 10 lacks support\n * for __proto__ and has a buggy typed array implementation.\n */\nBuffer.TYPED_ARRAY_SUPPORT = typedArraySupport()\n\nif (!Buffer.TYPED_ARRAY_SUPPORT && typeof console !== 'undefined' &&\n typeof console.error === 'function') {\n console.error(\n 'This browser lacks typed array (Uint8Array) support which is required by ' +\n '`buffer` v5.x. Use `buffer` v4.x if you require old browser support.'\n )\n}\n\nfunction typedArraySupport () {\n // Can typed array instances can be augmented?\n try {\n const arr = new Uint8Array(1)\n const proto = { foo: function () { return 42 } }\n Object.setPrototypeOf(proto, Uint8Array.prototype)\n Object.setPrototypeOf(arr, proto)\n return arr.foo() === 42\n } catch (e) {\n return false\n }\n}\n\nObject.defineProperty(Buffer.prototype, 'parent', {\n enumerable: true,\n get: function () {\n if (!Buffer.isBuffer(this)) return undefined\n return this.buffer\n }\n})\n\nObject.defineProperty(Buffer.prototype, 'offset', {\n enumerable: true,\n get: function () {\n if (!Buffer.isBuffer(this)) return undefined\n return this.byteOffset\n }\n})\n\nfunction createBuffer (length) {\n if (length > K_MAX_LENGTH) {\n throw new RangeError('The value \"' + length + '\" is invalid for option \"size\"')\n }\n // Return an augmented `Uint8Array` instance\n const buf = new Uint8Array(length)\n Object.setPrototypeOf(buf, Buffer.prototype)\n return buf\n}\n\n/**\n * The Buffer constructor returns instances of `Uint8Array` that have their\n * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of\n * `Uint8Array`, so the returned instances will have all the node `Buffer` methods\n * and the `Uint8Array` methods. Square bracket notation works as expected -- it\n * returns a single octet.\n *\n * The `Uint8Array` prototype remains unmodified.\n */\n\nfunction Buffer (arg, encodingOrOffset, length) {\n // Common case.\n if (typeof arg === 'number') {\n if (typeof encodingOrOffset === 'string') {\n throw new TypeError(\n 'The \"string\" argument must be of type string. Received type number'\n )\n }\n return allocUnsafe(arg)\n }\n return from(arg, encodingOrOffset, length)\n}\n\nBuffer.poolSize = 8192 // not used by this implementation\n\nfunction from (value, encodingOrOffset, length) {\n if (typeof value === 'string') {\n return fromString(value, encodingOrOffset)\n }\n\n if (ArrayBuffer.isView(value)) {\n return fromArrayView(value)\n }\n\n if (value == null) {\n throw new TypeError(\n 'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' +\n 'or Array-like Object. Received type ' + (typeof value)\n )\n }\n\n if (isInstance(value, ArrayBuffer) ||\n (value && isInstance(value.buffer, ArrayBuffer))) {\n return fromArrayBuffer(value, encodingOrOffset, length)\n }\n\n if (typeof SharedArrayBuffer !== 'undefined' &&\n (isInstance(value, SharedArrayBuffer) ||\n (value && isInstance(value.buffer, SharedArrayBuffer)))) {\n return fromArrayBuffer(value, encodingOrOffset, length)\n }\n\n if (typeof value === 'number') {\n throw new TypeError(\n 'The \"value\" argument must not be of type number. Received type number'\n )\n }\n\n const valueOf = value.valueOf && value.valueOf()\n if (valueOf != null && valueOf !== value) {\n return Buffer.from(valueOf, encodingOrOffset, length)\n }\n\n const b = fromObject(value)\n if (b) return b\n\n if (typeof Symbol !== 'undefined' && Symbol.toPrimitive != null &&\n typeof value[Symbol.toPrimitive] === 'function') {\n return Buffer.from(value[Symbol.toPrimitive]('string'), encodingOrOffset, length)\n }\n\n throw new TypeError(\n 'The first argument must be one of type string, Buffer, ArrayBuffer, Array, ' +\n 'or Array-like Object. Received type ' + (typeof value)\n )\n}\n\n/**\n * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError\n * if value is a number.\n * Buffer.from(str[, encoding])\n * Buffer.from(array)\n * Buffer.from(buffer)\n * Buffer.from(arrayBuffer[, byteOffset[, length]])\n **/\nBuffer.from = function (value, encodingOrOffset, length) {\n return from(value, encodingOrOffset, length)\n}\n\n// Note: Change prototype *after* Buffer.from is defined to workaround Chrome bug:\n// https://github.com/feross/buffer/pull/148\nObject.setPrototypeOf(Buffer.prototype, Uint8Array.prototype)\nObject.setPrototypeOf(Buffer, Uint8Array)\n\nfunction assertSize (size) {\n if (typeof size !== 'number') {\n throw new TypeError('\"size\" argument must be of type number')\n } else if (size < 0) {\n throw new RangeError('The value \"' + size + '\" is invalid for option \"size\"')\n }\n}\n\nfunction alloc (size, fill, encoding) {\n assertSize(size)\n if (size <= 0) {\n return createBuffer(size)\n }\n if (fill !== undefined) {\n // Only pay attention to encoding if it's a string. This\n // prevents accidentally sending in a number that would\n // be interpreted as a start offset.\n return typeof encoding === 'string'\n ? createBuffer(size).fill(fill, encoding)\n : createBuffer(size).fill(fill)\n }\n return createBuffer(size)\n}\n\n/**\n * Creates a new filled Buffer instance.\n * alloc(size[, fill[, encoding]])\n **/\nBuffer.alloc = function (size, fill, encoding) {\n return alloc(size, fill, encoding)\n}\n\nfunction allocUnsafe (size) {\n assertSize(size)\n return createBuffer(size < 0 ? 0 : checked(size) | 0)\n}\n\n/**\n * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance.\n * */\nBuffer.allocUnsafe = function (size) {\n return allocUnsafe(size)\n}\n/**\n * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance.\n */\nBuffer.allocUnsafeSlow = function (size) {\n return allocUnsafe(size)\n}\n\nfunction fromString (string, encoding) {\n if (typeof encoding !== 'string' || encoding === '') {\n encoding = 'utf8'\n }\n\n if (!Buffer.isEncoding(encoding)) {\n throw new TypeError('Unknown encoding: ' + encoding)\n }\n\n const length = byteLength(string, encoding) | 0\n let buf = createBuffer(length)\n\n const actual = buf.write(string, encoding)\n\n if (actual !== length) {\n // Writing a hex string, for example, that contains invalid characters will\n // cause everything after the first invalid character to be ignored. (e.g.\n // 'abxxcd' will be treated as 'ab')\n buf = buf.slice(0, actual)\n }\n\n return buf\n}\n\nfunction fromArrayLike (array) {\n const length = array.length < 0 ? 0 : checked(array.length) | 0\n const buf = createBuffer(length)\n for (let i = 0; i < length; i += 1) {\n buf[i] = array[i] & 255\n }\n return buf\n}\n\nfunction fromArrayView (arrayView) {\n if (isInstance(arrayView, Uint8Array)) {\n const copy = new Uint8Array(arrayView)\n return fromArrayBuffer(copy.buffer, copy.byteOffset, copy.byteLength)\n }\n return fromArrayLike(arrayView)\n}\n\nfunction fromArrayBuffer (array, byteOffset, length) {\n if (byteOffset < 0 || array.byteLength < byteOffset) {\n throw new RangeError('\"offset\" is outside of buffer bounds')\n }\n\n if (array.byteLength < byteOffset + (length || 0)) {\n throw new RangeError('\"length\" is outside of buffer bounds')\n }\n\n let buf\n if (byteOffset === undefined && length === undefined) {\n buf = new Uint8Array(array)\n } else if (length === undefined) {\n buf = new Uint8Array(array, byteOffset)\n } else {\n buf = new Uint8Array(array, byteOffset, length)\n }\n\n // Return an augmented `Uint8Array` instance\n Object.setPrototypeOf(buf, Buffer.prototype)\n\n return buf\n}\n\nfunction fromObject (obj) {\n if (Buffer.isBuffer(obj)) {\n const len = checked(obj.length) | 0\n const buf = createBuffer(len)\n\n if (buf.length === 0) {\n return buf\n }\n\n obj.copy(buf, 0, 0, len)\n return buf\n }\n\n if (obj.length !== undefined) {\n if (typeof obj.length !== 'number' || numberIsNaN(obj.length)) {\n return createBuffer(0)\n }\n return fromArrayLike(obj)\n }\n\n if (obj.type === 'Buffer' && Array.isArray(obj.data)) {\n return fromArrayLike(obj.data)\n }\n}\n\nfunction checked (length) {\n // Note: cannot use `length < K_MAX_LENGTH` here because that fails when\n // length is NaN (which is otherwise coerced to zero.)\n if (length >= K_MAX_LENGTH) {\n throw new RangeError('Attempt to allocate Buffer larger than maximum ' +\n 'size: 0x' + K_MAX_LENGTH.toString(16) + ' bytes')\n }\n return length | 0\n}\n\nfunction SlowBuffer (length) {\n if (+length != length) { // eslint-disable-line eqeqeq\n length = 0\n }\n return Buffer.alloc(+length)\n}\n\nBuffer.isBuffer = function isBuffer (b) {\n return b != null && b._isBuffer === true &&\n b !== Buffer.prototype // so Buffer.isBuffer(Buffer.prototype) will be false\n}\n\nBuffer.compare = function compare (a, b) {\n if (isInstance(a, Uint8Array)) a = Buffer.from(a, a.offset, a.byteLength)\n if (isInstance(b, Uint8Array)) b = Buffer.from(b, b.offset, b.byteLength)\n if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) {\n throw new TypeError(\n 'The \"buf1\", \"buf2\" arguments must be one of type Buffer or Uint8Array'\n )\n }\n\n if (a === b) return 0\n\n let x = a.length\n let y = b.length\n\n for (let i = 0, len = Math.min(x, y); i < len; ++i) {\n if (a[i] !== b[i]) {\n x = a[i]\n y = b[i]\n break\n }\n }\n\n if (x < y) return -1\n if (y < x) return 1\n return 0\n}\n\nBuffer.isEncoding = function isEncoding (encoding) {\n switch (String(encoding).toLowerCase()) {\n case 'hex':\n case 'utf8':\n case 'utf-8':\n case 'ascii':\n case 'latin1':\n case 'binary':\n case 'base64':\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return true\n default:\n return false\n }\n}\n\nBuffer.concat = function concat (list, length) {\n if (!Array.isArray(list)) {\n throw new TypeError('\"list\" argument must be an Array of Buffers')\n }\n\n if (list.length === 0) {\n return Buffer.alloc(0)\n }\n\n let i\n if (length === undefined) {\n length = 0\n for (i = 0; i < list.length; ++i) {\n length += list[i].length\n }\n }\n\n const buffer = Buffer.allocUnsafe(length)\n let pos = 0\n for (i = 0; i < list.length; ++i) {\n let buf = list[i]\n if (isInstance(buf, Uint8Array)) {\n if (pos + buf.length > buffer.length) {\n if (!Buffer.isBuffer(buf)) buf = Buffer.from(buf)\n buf.copy(buffer, pos)\n } else {\n Uint8Array.prototype.set.call(\n buffer,\n buf,\n pos\n )\n }\n } else if (!Buffer.isBuffer(buf)) {\n throw new TypeError('\"list\" argument must be an Array of Buffers')\n } else {\n buf.copy(buffer, pos)\n }\n pos += buf.length\n }\n return buffer\n}\n\nfunction byteLength (string, encoding) {\n if (Buffer.isBuffer(string)) {\n return string.length\n }\n if (ArrayBuffer.isView(string) || isInstance(string, ArrayBuffer)) {\n return string.byteLength\n }\n if (typeof string !== 'string') {\n throw new TypeError(\n 'The \"string\" argument must be one of type string, Buffer, or ArrayBuffer. ' +\n 'Received type ' + typeof string\n )\n }\n\n const len = string.length\n const mustMatch = (arguments.length > 2 && arguments[2] === true)\n if (!mustMatch && len === 0) return 0\n\n // Use a for loop to avoid recursion\n let loweredCase = false\n for (;;) {\n switch (encoding) {\n case 'ascii':\n case 'latin1':\n case 'binary':\n return len\n case 'utf8':\n case 'utf-8':\n return utf8ToBytes(string).length\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return len * 2\n case 'hex':\n return len >>> 1\n case 'base64':\n return base64ToBytes(string).length\n default:\n if (loweredCase) {\n return mustMatch ? -1 : utf8ToBytes(string).length // assume utf8\n }\n encoding = ('' + encoding).toLowerCase()\n loweredCase = true\n }\n }\n}\nBuffer.byteLength = byteLength\n\nfunction slowToString (encoding, start, end) {\n let loweredCase = false\n\n // No need to verify that \"this.length <= MAX_UINT32\" since it's a read-only\n // property of a typed array.\n\n // This behaves neither like String nor Uint8Array in that we set start/end\n // to their upper/lower bounds if the value passed is out of range.\n // undefined is handled specially as per ECMA-262 6th Edition,\n // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.\n if (start === undefined || start < 0) {\n start = 0\n }\n // Return early if start > this.length. Done here to prevent potential uint32\n // coercion fail below.\n if (start > this.length) {\n return ''\n }\n\n if (end === undefined || end > this.length) {\n end = this.length\n }\n\n if (end <= 0) {\n return ''\n }\n\n // Force coercion to uint32. This will also coerce falsey/NaN values to 0.\n end >>>= 0\n start >>>= 0\n\n if (end <= start) {\n return ''\n }\n\n if (!encoding) encoding = 'utf8'\n\n while (true) {\n switch (encoding) {\n case 'hex':\n return hexSlice(this, start, end)\n\n case 'utf8':\n case 'utf-8':\n return utf8Slice(this, start, end)\n\n case 'ascii':\n return asciiSlice(this, start, end)\n\n case 'latin1':\n case 'binary':\n return latin1Slice(this, start, end)\n\n case 'base64':\n return base64Slice(this, start, end)\n\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return utf16leSlice(this, start, end)\n\n default:\n if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)\n encoding = (encoding + '').toLowerCase()\n loweredCase = true\n }\n }\n}\n\n// This property is used by `Buffer.isBuffer` (and the `is-buffer` npm package)\n// to detect a Buffer instance. It's not possible to use `instanceof Buffer`\n// reliably in a browserify context because there could be multiple different\n// copies of the 'buffer' package in use. This method works even for Buffer\n// instances that were created from another copy of the `buffer` package.\n// See: https://github.com/feross/buffer/issues/154\nBuffer.prototype._isBuffer = true\n\nfunction swap (b, n, m) {\n const i = b[n]\n b[n] = b[m]\n b[m] = i\n}\n\nBuffer.prototype.swap16 = function swap16 () {\n const len = this.length\n if (len % 2 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 16-bits')\n }\n for (let i = 0; i < len; i += 2) {\n swap(this, i, i + 1)\n }\n return this\n}\n\nBuffer.prototype.swap32 = function swap32 () {\n const len = this.length\n if (len % 4 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 32-bits')\n }\n for (let i = 0; i < len; i += 4) {\n swap(this, i, i + 3)\n swap(this, i + 1, i + 2)\n }\n return this\n}\n\nBuffer.prototype.swap64 = function swap64 () {\n const len = this.length\n if (len % 8 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 64-bits')\n }\n for (let i = 0; i < len; i += 8) {\n swap(this, i, i + 7)\n swap(this, i + 1, i + 6)\n swap(this, i + 2, i + 5)\n swap(this, i + 3, i + 4)\n }\n return this\n}\n\nBuffer.prototype.toString = function toString () {\n const length = this.length\n if (length === 0) return ''\n if (arguments.length === 0) return utf8Slice(this, 0, length)\n return slowToString.apply(this, arguments)\n}\n\nBuffer.prototype.toLocaleString = Buffer.prototype.toString\n\nBuffer.prototype.equals = function equals (b) {\n if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer')\n if (this === b) return true\n return Buffer.compare(this, b) === 0\n}\n\nBuffer.prototype.inspect = function inspect () {\n let str = ''\n const max = exports.INSPECT_MAX_BYTES\n str = this.toString('hex', 0, max).replace(/(.{2})/g, '$1 ').trim()\n if (this.length > max) str += ' ... '\n return ''\n}\nif (customInspectSymbol) {\n Buffer.prototype[customInspectSymbol] = Buffer.prototype.inspect\n}\n\nBuffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) {\n if (isInstance(target, Uint8Array)) {\n target = Buffer.from(target, target.offset, target.byteLength)\n }\n if (!Buffer.isBuffer(target)) {\n throw new TypeError(\n 'The \"target\" argument must be one of type Buffer or Uint8Array. ' +\n 'Received type ' + (typeof target)\n )\n }\n\n if (start === undefined) {\n start = 0\n }\n if (end === undefined) {\n end = target ? target.length : 0\n }\n if (thisStart === undefined) {\n thisStart = 0\n }\n if (thisEnd === undefined) {\n thisEnd = this.length\n }\n\n if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) {\n throw new RangeError('out of range index')\n }\n\n if (thisStart >= thisEnd && start >= end) {\n return 0\n }\n if (thisStart >= thisEnd) {\n return -1\n }\n if (start >= end) {\n return 1\n }\n\n start >>>= 0\n end >>>= 0\n thisStart >>>= 0\n thisEnd >>>= 0\n\n if (this === target) return 0\n\n let x = thisEnd - thisStart\n let y = end - start\n const len = Math.min(x, y)\n\n const thisCopy = this.slice(thisStart, thisEnd)\n const targetCopy = target.slice(start, end)\n\n for (let i = 0; i < len; ++i) {\n if (thisCopy[i] !== targetCopy[i]) {\n x = thisCopy[i]\n y = targetCopy[i]\n break\n }\n }\n\n if (x < y) return -1\n if (y < x) return 1\n return 0\n}\n\n// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,\n// OR the last index of `val` in `buffer` at offset <= `byteOffset`.\n//\n// Arguments:\n// - buffer - a Buffer to search\n// - val - a string, Buffer, or number\n// - byteOffset - an index into `buffer`; will be clamped to an int32\n// - encoding - an optional encoding, relevant is val is a string\n// - dir - true for indexOf, false for lastIndexOf\nfunction bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) {\n // Empty buffer means no match\n if (buffer.length === 0) return -1\n\n // Normalize byteOffset\n if (typeof byteOffset === 'string') {\n encoding = byteOffset\n byteOffset = 0\n } else if (byteOffset > 0x7fffffff) {\n byteOffset = 0x7fffffff\n } else if (byteOffset < -0x80000000) {\n byteOffset = -0x80000000\n }\n byteOffset = +byteOffset // Coerce to Number.\n if (numberIsNaN(byteOffset)) {\n // byteOffset: it it's undefined, null, NaN, \"foo\", etc, search whole buffer\n byteOffset = dir ? 0 : (buffer.length - 1)\n }\n\n // Normalize byteOffset: negative offsets start from the end of the buffer\n if (byteOffset < 0) byteOffset = buffer.length + byteOffset\n if (byteOffset >= buffer.length) {\n if (dir) return -1\n else byteOffset = buffer.length - 1\n } else if (byteOffset < 0) {\n if (dir) byteOffset = 0\n else return -1\n }\n\n // Normalize val\n if (typeof val === 'string') {\n val = Buffer.from(val, encoding)\n }\n\n // Finally, search either indexOf (if dir is true) or lastIndexOf\n if (Buffer.isBuffer(val)) {\n // Special case: looking for empty string/buffer always fails\n if (val.length === 0) {\n return -1\n }\n return arrayIndexOf(buffer, val, byteOffset, encoding, dir)\n } else if (typeof val === 'number') {\n val = val & 0xFF // Search for a byte value [0-255]\n if (typeof Uint8Array.prototype.indexOf === 'function') {\n if (dir) {\n return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset)\n } else {\n return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset)\n }\n }\n return arrayIndexOf(buffer, [val], byteOffset, encoding, dir)\n }\n\n throw new TypeError('val must be string, number or Buffer')\n}\n\nfunction arrayIndexOf (arr, val, byteOffset, encoding, dir) {\n let indexSize = 1\n let arrLength = arr.length\n let valLength = val.length\n\n if (encoding !== undefined) {\n encoding = String(encoding).toLowerCase()\n if (encoding === 'ucs2' || encoding === 'ucs-2' ||\n encoding === 'utf16le' || encoding === 'utf-16le') {\n if (arr.length < 2 || val.length < 2) {\n return -1\n }\n indexSize = 2\n arrLength /= 2\n valLength /= 2\n byteOffset /= 2\n }\n }\n\n function read (buf, i) {\n if (indexSize === 1) {\n return buf[i]\n } else {\n return buf.readUInt16BE(i * indexSize)\n }\n }\n\n let i\n if (dir) {\n let foundIndex = -1\n for (i = byteOffset; i < arrLength; i++) {\n if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) {\n if (foundIndex === -1) foundIndex = i\n if (i - foundIndex + 1 === valLength) return foundIndex * indexSize\n } else {\n if (foundIndex !== -1) i -= i - foundIndex\n foundIndex = -1\n }\n }\n } else {\n if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength\n for (i = byteOffset; i >= 0; i--) {\n let found = true\n for (let j = 0; j < valLength; j++) {\n if (read(arr, i + j) !== read(val, j)) {\n found = false\n break\n }\n }\n if (found) return i\n }\n }\n\n return -1\n}\n\nBuffer.prototype.includes = function includes (val, byteOffset, encoding) {\n return this.indexOf(val, byteOffset, encoding) !== -1\n}\n\nBuffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) {\n return bidirectionalIndexOf(this, val, byteOffset, encoding, true)\n}\n\nBuffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) {\n return bidirectionalIndexOf(this, val, byteOffset, encoding, false)\n}\n\nfunction hexWrite (buf, string, offset, length) {\n offset = Number(offset) || 0\n const remaining = buf.length - offset\n if (!length) {\n length = remaining\n } else {\n length = Number(length)\n if (length > remaining) {\n length = remaining\n }\n }\n\n const strLen = string.length\n\n if (length > strLen / 2) {\n length = strLen / 2\n }\n let i\n for (i = 0; i < length; ++i) {\n const parsed = parseInt(string.substr(i * 2, 2), 16)\n if (numberIsNaN(parsed)) return i\n buf[offset + i] = parsed\n }\n return i\n}\n\nfunction utf8Write (buf, string, offset, length) {\n return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length)\n}\n\nfunction asciiWrite (buf, string, offset, length) {\n return blitBuffer(asciiToBytes(string), buf, offset, length)\n}\n\nfunction base64Write (buf, string, offset, length) {\n return blitBuffer(base64ToBytes(string), buf, offset, length)\n}\n\nfunction ucs2Write (buf, string, offset, length) {\n return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length)\n}\n\nBuffer.prototype.write = function write (string, offset, length, encoding) {\n // Buffer#write(string)\n if (offset === undefined) {\n encoding = 'utf8'\n length = this.length\n offset = 0\n // Buffer#write(string, encoding)\n } else if (length === undefined && typeof offset === 'string') {\n encoding = offset\n length = this.length\n offset = 0\n // Buffer#write(string, offset[, length][, encoding])\n } else if (isFinite(offset)) {\n offset = offset >>> 0\n if (isFinite(length)) {\n length = length >>> 0\n if (encoding === undefined) encoding = 'utf8'\n } else {\n encoding = length\n length = undefined\n }\n } else {\n throw new Error(\n 'Buffer.write(string, encoding, offset[, length]) is no longer supported'\n )\n }\n\n const remaining = this.length - offset\n if (length === undefined || length > remaining) length = remaining\n\n if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) {\n throw new RangeError('Attempt to write outside buffer bounds')\n }\n\n if (!encoding) encoding = 'utf8'\n\n let loweredCase = false\n for (;;) {\n switch (encoding) {\n case 'hex':\n return hexWrite(this, string, offset, length)\n\n case 'utf8':\n case 'utf-8':\n return utf8Write(this, string, offset, length)\n\n case 'ascii':\n case 'latin1':\n case 'binary':\n return asciiWrite(this, string, offset, length)\n\n case 'base64':\n // Warning: maxLength not taken into account in base64Write\n return base64Write(this, string, offset, length)\n\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return ucs2Write(this, string, offset, length)\n\n default:\n if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)\n encoding = ('' + encoding).toLowerCase()\n loweredCase = true\n }\n }\n}\n\nBuffer.prototype.toJSON = function toJSON () {\n return {\n type: 'Buffer',\n data: Array.prototype.slice.call(this._arr || this, 0)\n }\n}\n\nfunction base64Slice (buf, start, end) {\n if (start === 0 && end === buf.length) {\n return base64.fromByteArray(buf)\n } else {\n return base64.fromByteArray(buf.slice(start, end))\n }\n}\n\nfunction utf8Slice (buf, start, end) {\n end = Math.min(buf.length, end)\n const res = []\n\n let i = start\n while (i < end) {\n const firstByte = buf[i]\n let codePoint = null\n let bytesPerSequence = (firstByte > 0xEF)\n ? 4\n : (firstByte > 0xDF)\n ? 3\n : (firstByte > 0xBF)\n ? 2\n : 1\n\n if (i + bytesPerSequence <= end) {\n let secondByte, thirdByte, fourthByte, tempCodePoint\n\n switch (bytesPerSequence) {\n case 1:\n if (firstByte < 0x80) {\n codePoint = firstByte\n }\n break\n case 2:\n secondByte = buf[i + 1]\n if ((secondByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F)\n if (tempCodePoint > 0x7F) {\n codePoint = tempCodePoint\n }\n }\n break\n case 3:\n secondByte = buf[i + 1]\n thirdByte = buf[i + 2]\n if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F)\n if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) {\n codePoint = tempCodePoint\n }\n }\n break\n case 4:\n secondByte = buf[i + 1]\n thirdByte = buf[i + 2]\n fourthByte = buf[i + 3]\n if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F)\n if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) {\n codePoint = tempCodePoint\n }\n }\n }\n }\n\n if (codePoint === null) {\n // we did not generate a valid codePoint so insert a\n // replacement char (U+FFFD) and advance only 1 byte\n codePoint = 0xFFFD\n bytesPerSequence = 1\n } else if (codePoint > 0xFFFF) {\n // encode to utf16 (surrogate pair dance)\n codePoint -= 0x10000\n res.push(codePoint >>> 10 & 0x3FF | 0xD800)\n codePoint = 0xDC00 | codePoint & 0x3FF\n }\n\n res.push(codePoint)\n i += bytesPerSequence\n }\n\n return decodeCodePointsArray(res)\n}\n\n// Based on http://stackoverflow.com/a/22747272/680742, the browser with\n// the lowest limit is Chrome, with 0x10000 args.\n// We go 1 magnitude less, for safety\nconst MAX_ARGUMENTS_LENGTH = 0x1000\n\nfunction decodeCodePointsArray (codePoints) {\n const len = codePoints.length\n if (len <= MAX_ARGUMENTS_LENGTH) {\n return String.fromCharCode.apply(String, codePoints) // avoid extra slice()\n }\n\n // Decode in chunks to avoid \"call stack size exceeded\".\n let res = ''\n let i = 0\n while (i < len) {\n res += String.fromCharCode.apply(\n String,\n codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH)\n )\n }\n return res\n}\n\nfunction asciiSlice (buf, start, end) {\n let ret = ''\n end = Math.min(buf.length, end)\n\n for (let i = start; i < end; ++i) {\n ret += String.fromCharCode(buf[i] & 0x7F)\n }\n return ret\n}\n\nfunction latin1Slice (buf, start, end) {\n let ret = ''\n end = Math.min(buf.length, end)\n\n for (let i = start; i < end; ++i) {\n ret += String.fromCharCode(buf[i])\n }\n return ret\n}\n\nfunction hexSlice (buf, start, end) {\n const len = buf.length\n\n if (!start || start < 0) start = 0\n if (!end || end < 0 || end > len) end = len\n\n let out = ''\n for (let i = start; i < end; ++i) {\n out += hexSliceLookupTable[buf[i]]\n }\n return out\n}\n\nfunction utf16leSlice (buf, start, end) {\n const bytes = buf.slice(start, end)\n let res = ''\n // If bytes.length is odd, the last 8 bits must be ignored (same as node.js)\n for (let i = 0; i < bytes.length - 1; i += 2) {\n res += String.fromCharCode(bytes[i] + (bytes[i + 1] * 256))\n }\n return res\n}\n\nBuffer.prototype.slice = function slice (start, end) {\n const len = this.length\n start = ~~start\n end = end === undefined ? len : ~~end\n\n if (start < 0) {\n start += len\n if (start < 0) start = 0\n } else if (start > len) {\n start = len\n }\n\n if (end < 0) {\n end += len\n if (end < 0) end = 0\n } else if (end > len) {\n end = len\n }\n\n if (end < start) end = start\n\n const newBuf = this.subarray(start, end)\n // Return an augmented `Uint8Array` instance\n Object.setPrototypeOf(newBuf, Buffer.prototype)\n\n return newBuf\n}\n\n/*\n * Need to make sure that buffer isn't trying to write out of bounds.\n */\nfunction checkOffset (offset, ext, length) {\n if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint')\n if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length')\n}\n\nBuffer.prototype.readUintLE =\nBuffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) {\n offset = offset >>> 0\n byteLength = byteLength >>> 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n let val = this[offset]\n let mul = 1\n let i = 0\n while (++i < byteLength && (mul *= 0x100)) {\n val += this[offset + i] * mul\n }\n\n return val\n}\n\nBuffer.prototype.readUintBE =\nBuffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) {\n offset = offset >>> 0\n byteLength = byteLength >>> 0\n if (!noAssert) {\n checkOffset(offset, byteLength, this.length)\n }\n\n let val = this[offset + --byteLength]\n let mul = 1\n while (byteLength > 0 && (mul *= 0x100)) {\n val += this[offset + --byteLength] * mul\n }\n\n return val\n}\n\nBuffer.prototype.readUint8 =\nBuffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 1, this.length)\n return this[offset]\n}\n\nBuffer.prototype.readUint16LE =\nBuffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 2, this.length)\n return this[offset] | (this[offset + 1] << 8)\n}\n\nBuffer.prototype.readUint16BE =\nBuffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 2, this.length)\n return (this[offset] << 8) | this[offset + 1]\n}\n\nBuffer.prototype.readUint32LE =\nBuffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return ((this[offset]) |\n (this[offset + 1] << 8) |\n (this[offset + 2] << 16)) +\n (this[offset + 3] * 0x1000000)\n}\n\nBuffer.prototype.readUint32BE =\nBuffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset] * 0x1000000) +\n ((this[offset + 1] << 16) |\n (this[offset + 2] << 8) |\n this[offset + 3])\n}\n\nBuffer.prototype.readBigUInt64LE = defineBigIntMethod(function readBigUInt64LE (offset) {\n offset = offset >>> 0\n validateNumber(offset, 'offset')\n const first = this[offset]\n const last = this[offset + 7]\n if (first === undefined || last === undefined) {\n boundsError(offset, this.length - 8)\n }\n\n const lo = first +\n this[++offset] * 2 ** 8 +\n this[++offset] * 2 ** 16 +\n this[++offset] * 2 ** 24\n\n const hi = this[++offset] +\n this[++offset] * 2 ** 8 +\n this[++offset] * 2 ** 16 +\n last * 2 ** 24\n\n return BigInt(lo) + (BigInt(hi) << BigInt(32))\n})\n\nBuffer.prototype.readBigUInt64BE = defineBigIntMethod(function readBigUInt64BE (offset) {\n offset = offset >>> 0\n validateNumber(offset, 'offset')\n const first = this[offset]\n const last = this[offset + 7]\n if (first === undefined || last === undefined) {\n boundsError(offset, this.length - 8)\n }\n\n const hi = first * 2 ** 24 +\n this[++offset] * 2 ** 16 +\n this[++offset] * 2 ** 8 +\n this[++offset]\n\n const lo = this[++offset] * 2 ** 24 +\n this[++offset] * 2 ** 16 +\n this[++offset] * 2 ** 8 +\n last\n\n return (BigInt(hi) << BigInt(32)) + BigInt(lo)\n})\n\nBuffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) {\n offset = offset >>> 0\n byteLength = byteLength >>> 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n let val = this[offset]\n let mul = 1\n let i = 0\n while (++i < byteLength && (mul *= 0x100)) {\n val += this[offset + i] * mul\n }\n mul *= 0x80\n\n if (val >= mul) val -= Math.pow(2, 8 * byteLength)\n\n return val\n}\n\nBuffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) {\n offset = offset >>> 0\n byteLength = byteLength >>> 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n let i = byteLength\n let mul = 1\n let val = this[offset + --i]\n while (i > 0 && (mul *= 0x100)) {\n val += this[offset + --i] * mul\n }\n mul *= 0x80\n\n if (val >= mul) val -= Math.pow(2, 8 * byteLength)\n\n return val\n}\n\nBuffer.prototype.readInt8 = function readInt8 (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 1, this.length)\n if (!(this[offset] & 0x80)) return (this[offset])\n return ((0xff - this[offset] + 1) * -1)\n}\n\nBuffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 2, this.length)\n const val = this[offset] | (this[offset + 1] << 8)\n return (val & 0x8000) ? val | 0xFFFF0000 : val\n}\n\nBuffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 2, this.length)\n const val = this[offset + 1] | (this[offset] << 8)\n return (val & 0x8000) ? val | 0xFFFF0000 : val\n}\n\nBuffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset]) |\n (this[offset + 1] << 8) |\n (this[offset + 2] << 16) |\n (this[offset + 3] << 24)\n}\n\nBuffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset] << 24) |\n (this[offset + 1] << 16) |\n (this[offset + 2] << 8) |\n (this[offset + 3])\n}\n\nBuffer.prototype.readBigInt64LE = defineBigIntMethod(function readBigInt64LE (offset) {\n offset = offset >>> 0\n validateNumber(offset, 'offset')\n const first = this[offset]\n const last = this[offset + 7]\n if (first === undefined || last === undefined) {\n boundsError(offset, this.length - 8)\n }\n\n const val = this[offset + 4] +\n this[offset + 5] * 2 ** 8 +\n this[offset + 6] * 2 ** 16 +\n (last << 24) // Overflow\n\n return (BigInt(val) << BigInt(32)) +\n BigInt(first +\n this[++offset] * 2 ** 8 +\n this[++offset] * 2 ** 16 +\n this[++offset] * 2 ** 24)\n})\n\nBuffer.prototype.readBigInt64BE = defineBigIntMethod(function readBigInt64BE (offset) {\n offset = offset >>> 0\n validateNumber(offset, 'offset')\n const first = this[offset]\n const last = this[offset + 7]\n if (first === undefined || last === undefined) {\n boundsError(offset, this.length - 8)\n }\n\n const val = (first << 24) + // Overflow\n this[++offset] * 2 ** 16 +\n this[++offset] * 2 ** 8 +\n this[++offset]\n\n return (BigInt(val) << BigInt(32)) +\n BigInt(this[++offset] * 2 ** 24 +\n this[++offset] * 2 ** 16 +\n this[++offset] * 2 ** 8 +\n last)\n})\n\nBuffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 4, this.length)\n return ieee754.read(this, offset, true, 23, 4)\n}\n\nBuffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 4, this.length)\n return ieee754.read(this, offset, false, 23, 4)\n}\n\nBuffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 8, this.length)\n return ieee754.read(this, offset, true, 52, 8)\n}\n\nBuffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) {\n offset = offset >>> 0\n if (!noAssert) checkOffset(offset, 8, this.length)\n return ieee754.read(this, offset, false, 52, 8)\n}\n\nfunction checkInt (buf, value, offset, ext, max, min) {\n if (!Buffer.isBuffer(buf)) throw new TypeError('\"buffer\" argument must be a Buffer instance')\n if (value > max || value < min) throw new RangeError('\"value\" argument is out of bounds')\n if (offset + ext > buf.length) throw new RangeError('Index out of range')\n}\n\nBuffer.prototype.writeUintLE =\nBuffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset >>> 0\n byteLength = byteLength >>> 0\n if (!noAssert) {\n const maxBytes = Math.pow(2, 8 * byteLength) - 1\n checkInt(this, value, offset, byteLength, maxBytes, 0)\n }\n\n let mul = 1\n let i = 0\n this[offset] = value & 0xFF\n while (++i < byteLength && (mul *= 0x100)) {\n this[offset + i] = (value / mul) & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeUintBE =\nBuffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset >>> 0\n byteLength = byteLength >>> 0\n if (!noAssert) {\n const maxBytes = Math.pow(2, 8 * byteLength) - 1\n checkInt(this, value, offset, byteLength, maxBytes, 0)\n }\n\n let i = byteLength - 1\n let mul = 1\n this[offset + i] = value & 0xFF\n while (--i >= 0 && (mul *= 0x100)) {\n this[offset + i] = (value / mul) & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeUint8 =\nBuffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0)\n this[offset] = (value & 0xff)\n return offset + 1\n}\n\nBuffer.prototype.writeUint16LE =\nBuffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n return offset + 2\n}\n\nBuffer.prototype.writeUint16BE =\nBuffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)\n this[offset] = (value >>> 8)\n this[offset + 1] = (value & 0xff)\n return offset + 2\n}\n\nBuffer.prototype.writeUint32LE =\nBuffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)\n this[offset + 3] = (value >>> 24)\n this[offset + 2] = (value >>> 16)\n this[offset + 1] = (value >>> 8)\n this[offset] = (value & 0xff)\n return offset + 4\n}\n\nBuffer.prototype.writeUint32BE =\nBuffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)\n this[offset] = (value >>> 24)\n this[offset + 1] = (value >>> 16)\n this[offset + 2] = (value >>> 8)\n this[offset + 3] = (value & 0xff)\n return offset + 4\n}\n\nfunction wrtBigUInt64LE (buf, value, offset, min, max) {\n checkIntBI(value, min, max, buf, offset, 7)\n\n let lo = Number(value & BigInt(0xffffffff))\n buf[offset++] = lo\n lo = lo >> 8\n buf[offset++] = lo\n lo = lo >> 8\n buf[offset++] = lo\n lo = lo >> 8\n buf[offset++] = lo\n let hi = Number(value >> BigInt(32) & BigInt(0xffffffff))\n buf[offset++] = hi\n hi = hi >> 8\n buf[offset++] = hi\n hi = hi >> 8\n buf[offset++] = hi\n hi = hi >> 8\n buf[offset++] = hi\n return offset\n}\n\nfunction wrtBigUInt64BE (buf, value, offset, min, max) {\n checkIntBI(value, min, max, buf, offset, 7)\n\n let lo = Number(value & BigInt(0xffffffff))\n buf[offset + 7] = lo\n lo = lo >> 8\n buf[offset + 6] = lo\n lo = lo >> 8\n buf[offset + 5] = lo\n lo = lo >> 8\n buf[offset + 4] = lo\n let hi = Number(value >> BigInt(32) & BigInt(0xffffffff))\n buf[offset + 3] = hi\n hi = hi >> 8\n buf[offset + 2] = hi\n hi = hi >> 8\n buf[offset + 1] = hi\n hi = hi >> 8\n buf[offset] = hi\n return offset + 8\n}\n\nBuffer.prototype.writeBigUInt64LE = defineBigIntMethod(function writeBigUInt64LE (value, offset = 0) {\n return wrtBigUInt64LE(this, value, offset, BigInt(0), BigInt('0xffffffffffffffff'))\n})\n\nBuffer.prototype.writeBigUInt64BE = defineBigIntMethod(function writeBigUInt64BE (value, offset = 0) {\n return wrtBigUInt64BE(this, value, offset, BigInt(0), BigInt('0xffffffffffffffff'))\n})\n\nBuffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) {\n const limit = Math.pow(2, (8 * byteLength) - 1)\n\n checkInt(this, value, offset, byteLength, limit - 1, -limit)\n }\n\n let i = 0\n let mul = 1\n let sub = 0\n this[offset] = value & 0xFF\n while (++i < byteLength && (mul *= 0x100)) {\n if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) {\n sub = 1\n }\n this[offset + i] = ((value / mul) >> 0) - sub & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) {\n const limit = Math.pow(2, (8 * byteLength) - 1)\n\n checkInt(this, value, offset, byteLength, limit - 1, -limit)\n }\n\n let i = byteLength - 1\n let mul = 1\n let sub = 0\n this[offset + i] = value & 0xFF\n while (--i >= 0 && (mul *= 0x100)) {\n if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) {\n sub = 1\n }\n this[offset + i] = ((value / mul) >> 0) - sub & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80)\n if (value < 0) value = 0xff + value + 1\n this[offset] = (value & 0xff)\n return offset + 1\n}\n\nBuffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n return offset + 2\n}\n\nBuffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)\n this[offset] = (value >>> 8)\n this[offset + 1] = (value & 0xff)\n return offset + 2\n}\n\nBuffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n this[offset + 2] = (value >>> 16)\n this[offset + 3] = (value >>> 24)\n return offset + 4\n}\n\nBuffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)\n if (value < 0) value = 0xffffffff + value + 1\n this[offset] = (value >>> 24)\n this[offset + 1] = (value >>> 16)\n this[offset + 2] = (value >>> 8)\n this[offset + 3] = (value & 0xff)\n return offset + 4\n}\n\nBuffer.prototype.writeBigInt64LE = defineBigIntMethod(function writeBigInt64LE (value, offset = 0) {\n return wrtBigUInt64LE(this, value, offset, -BigInt('0x8000000000000000'), BigInt('0x7fffffffffffffff'))\n})\n\nBuffer.prototype.writeBigInt64BE = defineBigIntMethod(function writeBigInt64BE (value, offset = 0) {\n return wrtBigUInt64BE(this, value, offset, -BigInt('0x8000000000000000'), BigInt('0x7fffffffffffffff'))\n})\n\nfunction checkIEEE754 (buf, value, offset, ext, max, min) {\n if (offset + ext > buf.length) throw new RangeError('Index out of range')\n if (offset < 0) throw new RangeError('Index out of range')\n}\n\nfunction writeFloat (buf, value, offset, littleEndian, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) {\n checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38)\n }\n ieee754.write(buf, value, offset, littleEndian, 23, 4)\n return offset + 4\n}\n\nBuffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) {\n return writeFloat(this, value, offset, true, noAssert)\n}\n\nBuffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) {\n return writeFloat(this, value, offset, false, noAssert)\n}\n\nfunction writeDouble (buf, value, offset, littleEndian, noAssert) {\n value = +value\n offset = offset >>> 0\n if (!noAssert) {\n checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308)\n }\n ieee754.write(buf, value, offset, littleEndian, 52, 8)\n return offset + 8\n}\n\nBuffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) {\n return writeDouble(this, value, offset, true, noAssert)\n}\n\nBuffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) {\n return writeDouble(this, value, offset, false, noAssert)\n}\n\n// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)\nBuffer.prototype.copy = function copy (target, targetStart, start, end) {\n if (!Buffer.isBuffer(target)) throw new TypeError('argument should be a Buffer')\n if (!start) start = 0\n if (!end && end !== 0) end = this.length\n if (targetStart >= target.length) targetStart = target.length\n if (!targetStart) targetStart = 0\n if (end > 0 && end < start) end = start\n\n // Copy 0 bytes; we're done\n if (end === start) return 0\n if (target.length === 0 || this.length === 0) return 0\n\n // Fatal error conditions\n if (targetStart < 0) {\n throw new RangeError('targetStart out of bounds')\n }\n if (start < 0 || start >= this.length) throw new RangeError('Index out of range')\n if (end < 0) throw new RangeError('sourceEnd out of bounds')\n\n // Are we oob?\n if (end > this.length) end = this.length\n if (target.length - targetStart < end - start) {\n end = target.length - targetStart + start\n }\n\n const len = end - start\n\n if (this === target && typeof Uint8Array.prototype.copyWithin === 'function') {\n // Use built-in when available, missing from IE11\n this.copyWithin(targetStart, start, end)\n } else {\n Uint8Array.prototype.set.call(\n target,\n this.subarray(start, end),\n targetStart\n )\n }\n\n return len\n}\n\n// Usage:\n// buffer.fill(number[, offset[, end]])\n// buffer.fill(buffer[, offset[, end]])\n// buffer.fill(string[, offset[, end]][, encoding])\nBuffer.prototype.fill = function fill (val, start, end, encoding) {\n // Handle string cases:\n if (typeof val === 'string') {\n if (typeof start === 'string') {\n encoding = start\n start = 0\n end = this.length\n } else if (typeof end === 'string') {\n encoding = end\n end = this.length\n }\n if (encoding !== undefined && typeof encoding !== 'string') {\n throw new TypeError('encoding must be a string')\n }\n if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) {\n throw new TypeError('Unknown encoding: ' + encoding)\n }\n if (val.length === 1) {\n const code = val.charCodeAt(0)\n if ((encoding === 'utf8' && code < 128) ||\n encoding === 'latin1') {\n // Fast path: If `val` fits into a single byte, use that numeric value.\n val = code\n }\n }\n } else if (typeof val === 'number') {\n val = val & 255\n } else if (typeof val === 'boolean') {\n val = Number(val)\n }\n\n // Invalid ranges are not set to a default, so can range check early.\n if (start < 0 || this.length < start || this.length < end) {\n throw new RangeError('Out of range index')\n }\n\n if (end <= start) {\n return this\n }\n\n start = start >>> 0\n end = end === undefined ? this.length : end >>> 0\n\n if (!val) val = 0\n\n let i\n if (typeof val === 'number') {\n for (i = start; i < end; ++i) {\n this[i] = val\n }\n } else {\n const bytes = Buffer.isBuffer(val)\n ? val\n : Buffer.from(val, encoding)\n const len = bytes.length\n if (len === 0) {\n throw new TypeError('The value \"' + val +\n '\" is invalid for argument \"value\"')\n }\n for (i = 0; i < end - start; ++i) {\n this[i + start] = bytes[i % len]\n }\n }\n\n return this\n}\n\n// CUSTOM ERRORS\n// =============\n\n// Simplified versions from Node, changed for Buffer-only usage\nconst errors = {}\nfunction E (sym, getMessage, Base) {\n errors[sym] = class NodeError extends Base {\n constructor () {\n super()\n\n Object.defineProperty(this, 'message', {\n value: getMessage.apply(this, arguments),\n writable: true,\n configurable: true\n })\n\n // Add the error code to the name to include it in the stack trace.\n this.name = `${this.name} [${sym}]`\n // Access the stack to generate the error message including the error code\n // from the name.\n this.stack // eslint-disable-line no-unused-expressions\n // Reset the name to the actual name.\n delete this.name\n }\n\n get code () {\n return sym\n }\n\n set code (value) {\n Object.defineProperty(this, 'code', {\n configurable: true,\n enumerable: true,\n value,\n writable: true\n })\n }\n\n toString () {\n return `${this.name} [${sym}]: ${this.message}`\n }\n }\n}\n\nE('ERR_BUFFER_OUT_OF_BOUNDS',\n function (name) {\n if (name) {\n return `${name} is outside of buffer bounds`\n }\n\n return 'Attempt to access memory outside buffer bounds'\n }, RangeError)\nE('ERR_INVALID_ARG_TYPE',\n function (name, actual) {\n return `The \"${name}\" argument must be of type number. Received type ${typeof actual}`\n }, TypeError)\nE('ERR_OUT_OF_RANGE',\n function (str, range, input) {\n let msg = `The value of \"${str}\" is out of range.`\n let received = input\n if (Number.isInteger(input) && Math.abs(input) > 2 ** 32) {\n received = addNumericalSeparator(String(input))\n } else if (typeof input === 'bigint') {\n received = String(input)\n if (input > BigInt(2) ** BigInt(32) || input < -(BigInt(2) ** BigInt(32))) {\n received = addNumericalSeparator(received)\n }\n received += 'n'\n }\n msg += ` It must be ${range}. Received ${received}`\n return msg\n }, RangeError)\n\nfunction addNumericalSeparator (val) {\n let res = ''\n let i = val.length\n const start = val[0] === '-' ? 1 : 0\n for (; i >= start + 4; i -= 3) {\n res = `_${val.slice(i - 3, i)}${res}`\n }\n return `${val.slice(0, i)}${res}`\n}\n\n// CHECK FUNCTIONS\n// ===============\n\nfunction checkBounds (buf, offset, byteLength) {\n validateNumber(offset, 'offset')\n if (buf[offset] === undefined || buf[offset + byteLength] === undefined) {\n boundsError(offset, buf.length - (byteLength + 1))\n }\n}\n\nfunction checkIntBI (value, min, max, buf, offset, byteLength) {\n if (value > max || value < min) {\n const n = typeof min === 'bigint' ? 'n' : ''\n let range\n if (byteLength > 3) {\n if (min === 0 || min === BigInt(0)) {\n range = `>= 0${n} and < 2${n} ** ${(byteLength + 1) * 8}${n}`\n } else {\n range = `>= -(2${n} ** ${(byteLength + 1) * 8 - 1}${n}) and < 2 ** ` +\n `${(byteLength + 1) * 8 - 1}${n}`\n }\n } else {\n range = `>= ${min}${n} and <= ${max}${n}`\n }\n throw new errors.ERR_OUT_OF_RANGE('value', range, value)\n }\n checkBounds(buf, offset, byteLength)\n}\n\nfunction validateNumber (value, name) {\n if (typeof value !== 'number') {\n throw new errors.ERR_INVALID_ARG_TYPE(name, 'number', value)\n }\n}\n\nfunction boundsError (value, length, type) {\n if (Math.floor(value) !== value) {\n validateNumber(value, type)\n throw new errors.ERR_OUT_OF_RANGE(type || 'offset', 'an integer', value)\n }\n\n if (length < 0) {\n throw new errors.ERR_BUFFER_OUT_OF_BOUNDS()\n }\n\n throw new errors.ERR_OUT_OF_RANGE(type || 'offset',\n `>= ${type ? 1 : 0} and <= ${length}`,\n value)\n}\n\n// HELPER FUNCTIONS\n// ================\n\nconst INVALID_BASE64_RE = /[^+/0-9A-Za-z-_]/g\n\nfunction base64clean (str) {\n // Node takes equal signs as end of the Base64 encoding\n str = str.split('=')[0]\n // Node strips out invalid characters like \\n and \\t from the string, base64-js does not\n str = str.trim().replace(INVALID_BASE64_RE, '')\n // Node converts strings with length < 2 to ''\n if (str.length < 2) return ''\n // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not\n while (str.length % 4 !== 0) {\n str = str + '='\n }\n return str\n}\n\nfunction utf8ToBytes (string, units) {\n units = units || Infinity\n let codePoint\n const length = string.length\n let leadSurrogate = null\n const bytes = []\n\n for (let i = 0; i < length; ++i) {\n codePoint = string.charCodeAt(i)\n\n // is surrogate component\n if (codePoint > 0xD7FF && codePoint < 0xE000) {\n // last char was a lead\n if (!leadSurrogate) {\n // no lead yet\n if (codePoint > 0xDBFF) {\n // unexpected trail\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n continue\n } else if (i + 1 === length) {\n // unpaired lead\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n continue\n }\n\n // valid lead\n leadSurrogate = codePoint\n\n continue\n }\n\n // 2 leads in a row\n if (codePoint < 0xDC00) {\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n leadSurrogate = codePoint\n continue\n }\n\n // valid surrogate pair\n codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000\n } else if (leadSurrogate) {\n // valid bmp char, but last char was a lead\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n }\n\n leadSurrogate = null\n\n // encode utf8\n if (codePoint < 0x80) {\n if ((units -= 1) < 0) break\n bytes.push(codePoint)\n } else if (codePoint < 0x800) {\n if ((units -= 2) < 0) break\n bytes.push(\n codePoint >> 0x6 | 0xC0,\n codePoint & 0x3F | 0x80\n )\n } else if (codePoint < 0x10000) {\n if ((units -= 3) < 0) break\n bytes.push(\n codePoint >> 0xC | 0xE0,\n codePoint >> 0x6 & 0x3F | 0x80,\n codePoint & 0x3F | 0x80\n )\n } else if (codePoint < 0x110000) {\n if ((units -= 4) < 0) break\n bytes.push(\n codePoint >> 0x12 | 0xF0,\n codePoint >> 0xC & 0x3F | 0x80,\n codePoint >> 0x6 & 0x3F | 0x80,\n codePoint & 0x3F | 0x80\n )\n } else {\n throw new Error('Invalid code point')\n }\n }\n\n return bytes\n}\n\nfunction asciiToBytes (str) {\n const byteArray = []\n for (let i = 0; i < str.length; ++i) {\n // Node's code seems to be doing this and not & 0x7F..\n byteArray.push(str.charCodeAt(i) & 0xFF)\n }\n return byteArray\n}\n\nfunction utf16leToBytes (str, units) {\n let c, hi, lo\n const byteArray = []\n for (let i = 0; i < str.length; ++i) {\n if ((units -= 2) < 0) break\n\n c = str.charCodeAt(i)\n hi = c >> 8\n lo = c % 256\n byteArray.push(lo)\n byteArray.push(hi)\n }\n\n return byteArray\n}\n\nfunction base64ToBytes (str) {\n return base64.toByteArray(base64clean(str))\n}\n\nfunction blitBuffer (src, dst, offset, length) {\n let i\n for (i = 0; i < length; ++i) {\n if ((i + offset >= dst.length) || (i >= src.length)) break\n dst[i + offset] = src[i]\n }\n return i\n}\n\n// ArrayBuffer or Uint8Array objects from other contexts (i.e. iframes) do not pass\n// the `instanceof` check but they should be treated as of that type.\n// See: https://github.com/feross/buffer/issues/166\nfunction isInstance (obj, type) {\n return obj instanceof type ||\n (obj != null && obj.constructor != null && obj.constructor.name != null &&\n obj.constructor.name === type.name)\n}\nfunction numberIsNaN (obj) {\n // For IE11 support\n return obj !== obj // eslint-disable-line no-self-compare\n}\n\n// Create lookup table for `toString('hex')`\n// See: https://github.com/feross/buffer/issues/219\nconst hexSliceLookupTable = (function () {\n const alphabet = '0123456789abcdef'\n const table = new Array(256)\n for (let i = 0; i < 16; ++i) {\n const i16 = i * 16\n for (let j = 0; j < 16; ++j) {\n table[i16 + j] = alphabet[i] + alphabet[j]\n }\n }\n return table\n})()\n\n// Return not function with Error if BigInt not supported\nfunction defineBigIntMethod (fn) {\n return typeof BigInt === 'undefined' ? BufferBigIntNotDefined : fn\n}\n\nfunction BufferBigIntNotDefined () {\n throw new Error('BigInt not supported')\n}\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/buffer/index.js?"); + +/***/ }), + +/***/ "./node_modules/entities/lib/decode.js": +/*!*********************************************!*\ + !*** ./node_modules/entities/lib/decode.js ***! + \*********************************************/ +/***/ (function(__unused_webpack_module, exports, __webpack_require__) { + +"use strict"; +eval("\nvar __importDefault = (this && this.__importDefault) || function (mod) {\n return (mod && mod.__esModule) ? mod : { \"default\": mod };\n};\nObject.defineProperty(exports, \"__esModule\", ({ value: true }));\nexports.decodeHTML = exports.decodeHTMLStrict = exports.decodeXML = void 0;\nvar entities_json_1 = __importDefault(__webpack_require__(/*! ./maps/entities.json */ \"./node_modules/entities/lib/maps/entities.json\"));\nvar legacy_json_1 = __importDefault(__webpack_require__(/*! ./maps/legacy.json */ \"./node_modules/entities/lib/maps/legacy.json\"));\nvar xml_json_1 = __importDefault(__webpack_require__(/*! ./maps/xml.json */ \"./node_modules/entities/lib/maps/xml.json\"));\nvar decode_codepoint_1 = __importDefault(__webpack_require__(/*! ./decode_codepoint */ \"./node_modules/entities/lib/decode_codepoint.js\"));\nvar strictEntityRe = /&(?:[a-zA-Z0-9]+|#[xX][\\da-fA-F]+|#\\d+);/g;\nexports.decodeXML = getStrictDecoder(xml_json_1.default);\nexports.decodeHTMLStrict = getStrictDecoder(entities_json_1.default);\nfunction getStrictDecoder(map) {\n var replace = getReplacer(map);\n return function (str) { return String(str).replace(strictEntityRe, replace); };\n}\nvar sorter = function (a, b) { return (a < b ? 1 : -1); };\nexports.decodeHTML = (function () {\n var legacy = Object.keys(legacy_json_1.default).sort(sorter);\n var keys = Object.keys(entities_json_1.default).sort(sorter);\n for (var i = 0, j = 0; i < keys.length; i++) {\n if (legacy[j] === keys[i]) {\n keys[i] += \";?\";\n j++;\n }\n else {\n keys[i] += \";\";\n }\n }\n var re = new RegExp(\"&(?:\" + keys.join(\"|\") + \"|#[xX][\\\\da-fA-F]+;?|#\\\\d+;?)\", \"g\");\n var replace = getReplacer(entities_json_1.default);\n function replacer(str) {\n if (str.substr(-1) !== \";\")\n str += \";\";\n return replace(str);\n }\n // TODO consider creating a merged map\n return function (str) { return String(str).replace(re, replacer); };\n})();\nfunction getReplacer(map) {\n return function replace(str) {\n if (str.charAt(1) === \"#\") {\n var secondChar = str.charAt(2);\n if (secondChar === \"X\" || secondChar === \"x\") {\n return decode_codepoint_1.default(parseInt(str.substr(3), 16));\n }\n return decode_codepoint_1.default(parseInt(str.substr(2), 10));\n }\n // eslint-disable-next-line @typescript-eslint/prefer-nullish-coalescing\n return map[str.slice(1, -1)] || str;\n };\n}\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/entities/lib/decode.js?"); + +/***/ }), + +/***/ "./node_modules/entities/lib/decode_codepoint.js": +/*!*******************************************************!*\ + !*** ./node_modules/entities/lib/decode_codepoint.js ***! + \*******************************************************/ +/***/ (function(__unused_webpack_module, exports, __webpack_require__) { + +"use strict"; +eval("\nvar __importDefault = (this && this.__importDefault) || function (mod) {\n return (mod && mod.__esModule) ? mod : { \"default\": mod };\n};\nObject.defineProperty(exports, \"__esModule\", ({ value: true }));\nvar decode_json_1 = __importDefault(__webpack_require__(/*! ./maps/decode.json */ \"./node_modules/entities/lib/maps/decode.json\"));\n// Adapted from https://github.com/mathiasbynens/he/blob/master/src/he.js#L94-L119\nvar fromCodePoint = \n// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\nString.fromCodePoint ||\n function (codePoint) {\n var output = \"\";\n if (codePoint > 0xffff) {\n codePoint -= 0x10000;\n output += String.fromCharCode(((codePoint >>> 10) & 0x3ff) | 0xd800);\n codePoint = 0xdc00 | (codePoint & 0x3ff);\n }\n output += String.fromCharCode(codePoint);\n return output;\n };\nfunction decodeCodePoint(codePoint) {\n if ((codePoint >= 0xd800 && codePoint <= 0xdfff) || codePoint > 0x10ffff) {\n return \"\\uFFFD\";\n }\n if (codePoint in decode_json_1.default) {\n codePoint = decode_json_1.default[codePoint];\n }\n return fromCodePoint(codePoint);\n}\nexports[\"default\"] = decodeCodePoint;\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/entities/lib/decode_codepoint.js?"); + +/***/ }), + +/***/ "./node_modules/entities/lib/encode.js": +/*!*********************************************!*\ + !*** ./node_modules/entities/lib/encode.js ***! + \*********************************************/ +/***/ (function(__unused_webpack_module, exports, __webpack_require__) { + +"use strict"; +eval("\nvar __importDefault = (this && this.__importDefault) || function (mod) {\n return (mod && mod.__esModule) ? mod : { \"default\": mod };\n};\nObject.defineProperty(exports, \"__esModule\", ({ value: true }));\nexports.escapeUTF8 = exports.escape = exports.encodeNonAsciiHTML = exports.encodeHTML = exports.encodeXML = void 0;\nvar xml_json_1 = __importDefault(__webpack_require__(/*! ./maps/xml.json */ \"./node_modules/entities/lib/maps/xml.json\"));\nvar inverseXML = getInverseObj(xml_json_1.default);\nvar xmlReplacer = getInverseReplacer(inverseXML);\n/**\n * Encodes all non-ASCII characters, as well as characters not valid in XML\n * documents using XML entities.\n *\n * If a character has no equivalent entity, a\n * numeric hexadecimal reference (eg. `ü`) will be used.\n */\nexports.encodeXML = getASCIIEncoder(inverseXML);\nvar entities_json_1 = __importDefault(__webpack_require__(/*! ./maps/entities.json */ \"./node_modules/entities/lib/maps/entities.json\"));\nvar inverseHTML = getInverseObj(entities_json_1.default);\nvar htmlReplacer = getInverseReplacer(inverseHTML);\n/**\n * Encodes all entities and non-ASCII characters in the input.\n *\n * This includes characters that are valid ASCII characters in HTML documents.\n * For example `#` will be encoded as `#`. To get a more compact output,\n * consider using the `encodeNonAsciiHTML` function.\n *\n * If a character has no equivalent entity, a\n * numeric hexadecimal reference (eg. `ü`) will be used.\n */\nexports.encodeHTML = getInverse(inverseHTML, htmlReplacer);\n/**\n * Encodes all non-ASCII characters, as well as characters not valid in HTML\n * documents using HTML entities.\n *\n * If a character has no equivalent entity, a\n * numeric hexadecimal reference (eg. `ü`) will be used.\n */\nexports.encodeNonAsciiHTML = getASCIIEncoder(inverseHTML);\nfunction getInverseObj(obj) {\n return Object.keys(obj)\n .sort()\n .reduce(function (inverse, name) {\n inverse[obj[name]] = \"&\" + name + \";\";\n return inverse;\n }, {});\n}\nfunction getInverseReplacer(inverse) {\n var single = [];\n var multiple = [];\n for (var _i = 0, _a = Object.keys(inverse); _i < _a.length; _i++) {\n var k = _a[_i];\n if (k.length === 1) {\n // Add value to single array\n single.push(\"\\\\\" + k);\n }\n else {\n // Add value to multiple array\n multiple.push(k);\n }\n }\n // Add ranges to single characters.\n single.sort();\n for (var start = 0; start < single.length - 1; start++) {\n // Find the end of a run of characters\n var end = start;\n while (end < single.length - 1 &&\n single[end].charCodeAt(1) + 1 === single[end + 1].charCodeAt(1)) {\n end += 1;\n }\n var count = 1 + end - start;\n // We want to replace at least three characters\n if (count < 3)\n continue;\n single.splice(start, count, single[start] + \"-\" + single[end]);\n }\n multiple.unshift(\"[\" + single.join(\"\") + \"]\");\n return new RegExp(multiple.join(\"|\"), \"g\");\n}\n// /[^\\0-\\x7F]/gu\nvar reNonASCII = /(?:[\\x80-\\uD7FF\\uE000-\\uFFFF]|[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]|[\\uD800-\\uDBFF](?![\\uDC00-\\uDFFF])|(?:[^\\uD800-\\uDBFF]|^)[\\uDC00-\\uDFFF])/g;\nvar getCodePoint = \n// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\nString.prototype.codePointAt != null\n ? // eslint-disable-next-line @typescript-eslint/no-non-null-assertion\n function (str) { return str.codePointAt(0); }\n : // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae\n function (c) {\n return (c.charCodeAt(0) - 0xd800) * 0x400 +\n c.charCodeAt(1) -\n 0xdc00 +\n 0x10000;\n };\nfunction singleCharReplacer(c) {\n return \"&#x\" + (c.length > 1 ? getCodePoint(c) : c.charCodeAt(0))\n .toString(16)\n .toUpperCase() + \";\";\n}\nfunction getInverse(inverse, re) {\n return function (data) {\n return data\n .replace(re, function (name) { return inverse[name]; })\n .replace(reNonASCII, singleCharReplacer);\n };\n}\nvar reEscapeChars = new RegExp(xmlReplacer.source + \"|\" + reNonASCII.source, \"g\");\n/**\n * Encodes all non-ASCII characters, as well as characters not valid in XML\n * documents using numeric hexadecimal reference (eg. `ü`).\n *\n * Have a look at `escapeUTF8` if you want a more concise output at the expense\n * of reduced transportability.\n *\n * @param data String to escape.\n */\nfunction escape(data) {\n return data.replace(reEscapeChars, singleCharReplacer);\n}\nexports.escape = escape;\n/**\n * Encodes all characters not valid in XML documents using numeric hexadecimal\n * reference (eg. `ü`).\n *\n * Note that the output will be character-set dependent.\n *\n * @param data String to escape.\n */\nfunction escapeUTF8(data) {\n return data.replace(xmlReplacer, singleCharReplacer);\n}\nexports.escapeUTF8 = escapeUTF8;\nfunction getASCIIEncoder(obj) {\n return function (data) {\n return data.replace(reEscapeChars, function (c) { return obj[c] || singleCharReplacer(c); });\n };\n}\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/entities/lib/encode.js?"); + +/***/ }), + +/***/ "./node_modules/entities/lib/index.js": +/*!********************************************!*\ + !*** ./node_modules/entities/lib/index.js ***! + \********************************************/ +/***/ ((__unused_webpack_module, exports, __webpack_require__) => { + +"use strict"; +eval("\nObject.defineProperty(exports, \"__esModule\", ({ value: true }));\nexports.decodeXMLStrict = exports.decodeHTML5Strict = exports.decodeHTML4Strict = exports.decodeHTML5 = exports.decodeHTML4 = exports.decodeHTMLStrict = exports.decodeHTML = exports.decodeXML = exports.encodeHTML5 = exports.encodeHTML4 = exports.escapeUTF8 = exports.escape = exports.encodeNonAsciiHTML = exports.encodeHTML = exports.encodeXML = exports.encode = exports.decodeStrict = exports.decode = void 0;\nvar decode_1 = __webpack_require__(/*! ./decode */ \"./node_modules/entities/lib/decode.js\");\nvar encode_1 = __webpack_require__(/*! ./encode */ \"./node_modules/entities/lib/encode.js\");\n/**\n * Decodes a string with entities.\n *\n * @param data String to decode.\n * @param level Optional level to decode at. 0 = XML, 1 = HTML. Default is 0.\n * @deprecated Use `decodeXML` or `decodeHTML` directly.\n */\nfunction decode(data, level) {\n return (!level || level <= 0 ? decode_1.decodeXML : decode_1.decodeHTML)(data);\n}\nexports.decode = decode;\n/**\n * Decodes a string with entities. Does not allow missing trailing semicolons for entities.\n *\n * @param data String to decode.\n * @param level Optional level to decode at. 0 = XML, 1 = HTML. Default is 0.\n * @deprecated Use `decodeHTMLStrict` or `decodeXML` directly.\n */\nfunction decodeStrict(data, level) {\n return (!level || level <= 0 ? decode_1.decodeXML : decode_1.decodeHTMLStrict)(data);\n}\nexports.decodeStrict = decodeStrict;\n/**\n * Encodes a string with entities.\n *\n * @param data String to encode.\n * @param level Optional level to encode at. 0 = XML, 1 = HTML. Default is 0.\n * @deprecated Use `encodeHTML`, `encodeXML` or `encodeNonAsciiHTML` directly.\n */\nfunction encode(data, level) {\n return (!level || level <= 0 ? encode_1.encodeXML : encode_1.encodeHTML)(data);\n}\nexports.encode = encode;\nvar encode_2 = __webpack_require__(/*! ./encode */ \"./node_modules/entities/lib/encode.js\");\nObject.defineProperty(exports, \"encodeXML\", ({ enumerable: true, get: function () { return encode_2.encodeXML; } }));\nObject.defineProperty(exports, \"encodeHTML\", ({ enumerable: true, get: function () { return encode_2.encodeHTML; } }));\nObject.defineProperty(exports, \"encodeNonAsciiHTML\", ({ enumerable: true, get: function () { return encode_2.encodeNonAsciiHTML; } }));\nObject.defineProperty(exports, \"escape\", ({ enumerable: true, get: function () { return encode_2.escape; } }));\nObject.defineProperty(exports, \"escapeUTF8\", ({ enumerable: true, get: function () { return encode_2.escapeUTF8; } }));\n// Legacy aliases (deprecated)\nObject.defineProperty(exports, \"encodeHTML4\", ({ enumerable: true, get: function () { return encode_2.encodeHTML; } }));\nObject.defineProperty(exports, \"encodeHTML5\", ({ enumerable: true, get: function () { return encode_2.encodeHTML; } }));\nvar decode_2 = __webpack_require__(/*! ./decode */ \"./node_modules/entities/lib/decode.js\");\nObject.defineProperty(exports, \"decodeXML\", ({ enumerable: true, get: function () { return decode_2.decodeXML; } }));\nObject.defineProperty(exports, \"decodeHTML\", ({ enumerable: true, get: function () { return decode_2.decodeHTML; } }));\nObject.defineProperty(exports, \"decodeHTMLStrict\", ({ enumerable: true, get: function () { return decode_2.decodeHTMLStrict; } }));\n// Legacy aliases (deprecated)\nObject.defineProperty(exports, \"decodeHTML4\", ({ enumerable: true, get: function () { return decode_2.decodeHTML; } }));\nObject.defineProperty(exports, \"decodeHTML5\", ({ enumerable: true, get: function () { return decode_2.decodeHTML; } }));\nObject.defineProperty(exports, \"decodeHTML4Strict\", ({ enumerable: true, get: function () { return decode_2.decodeHTMLStrict; } }));\nObject.defineProperty(exports, \"decodeHTML5Strict\", ({ enumerable: true, get: function () { return decode_2.decodeHTMLStrict; } }));\nObject.defineProperty(exports, \"decodeXMLStrict\", ({ enumerable: true, get: function () { return decode_2.decodeXML; } }));\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/entities/lib/index.js?"); + +/***/ }), + +/***/ "./node_modules/events/events.js": +/*!***************************************!*\ + !*** ./node_modules/events/events.js ***! + \***************************************/ +/***/ ((module) => { + +"use strict"; +eval("// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to permit\n// persons to whom the Software is furnished to do so, subject to the\n// following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n// USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\nvar R = typeof Reflect === 'object' ? Reflect : null\nvar ReflectApply = R && typeof R.apply === 'function'\n ? R.apply\n : function ReflectApply(target, receiver, args) {\n return Function.prototype.apply.call(target, receiver, args);\n }\n\nvar ReflectOwnKeys\nif (R && typeof R.ownKeys === 'function') {\n ReflectOwnKeys = R.ownKeys\n} else if (Object.getOwnPropertySymbols) {\n ReflectOwnKeys = function ReflectOwnKeys(target) {\n return Object.getOwnPropertyNames(target)\n .concat(Object.getOwnPropertySymbols(target));\n };\n} else {\n ReflectOwnKeys = function ReflectOwnKeys(target) {\n return Object.getOwnPropertyNames(target);\n };\n}\n\nfunction ProcessEmitWarning(warning) {\n if (console && console.warn) console.warn(warning);\n}\n\nvar NumberIsNaN = Number.isNaN || function NumberIsNaN(value) {\n return value !== value;\n}\n\nfunction EventEmitter() {\n EventEmitter.init.call(this);\n}\nmodule.exports = EventEmitter;\nmodule.exports.once = once;\n\n// Backwards-compat with node 0.10.x\nEventEmitter.EventEmitter = EventEmitter;\n\nEventEmitter.prototype._events = undefined;\nEventEmitter.prototype._eventsCount = 0;\nEventEmitter.prototype._maxListeners = undefined;\n\n// By default EventEmitters will print a warning if more than 10 listeners are\n// added to it. This is a useful default which helps finding memory leaks.\nvar defaultMaxListeners = 10;\n\nfunction checkListener(listener) {\n if (typeof listener !== 'function') {\n throw new TypeError('The \"listener\" argument must be of type Function. Received type ' + typeof listener);\n }\n}\n\nObject.defineProperty(EventEmitter, 'defaultMaxListeners', {\n enumerable: true,\n get: function() {\n return defaultMaxListeners;\n },\n set: function(arg) {\n if (typeof arg !== 'number' || arg < 0 || NumberIsNaN(arg)) {\n throw new RangeError('The value of \"defaultMaxListeners\" is out of range. It must be a non-negative number. Received ' + arg + '.');\n }\n defaultMaxListeners = arg;\n }\n});\n\nEventEmitter.init = function() {\n\n if (this._events === undefined ||\n this._events === Object.getPrototypeOf(this)._events) {\n this._events = Object.create(null);\n this._eventsCount = 0;\n }\n\n this._maxListeners = this._maxListeners || undefined;\n};\n\n// Obviously not all Emitters should be limited to 10. This function allows\n// that to be increased. Set to zero for unlimited.\nEventEmitter.prototype.setMaxListeners = function setMaxListeners(n) {\n if (typeof n !== 'number' || n < 0 || NumberIsNaN(n)) {\n throw new RangeError('The value of \"n\" is out of range. It must be a non-negative number. Received ' + n + '.');\n }\n this._maxListeners = n;\n return this;\n};\n\nfunction _getMaxListeners(that) {\n if (that._maxListeners === undefined)\n return EventEmitter.defaultMaxListeners;\n return that._maxListeners;\n}\n\nEventEmitter.prototype.getMaxListeners = function getMaxListeners() {\n return _getMaxListeners(this);\n};\n\nEventEmitter.prototype.emit = function emit(type) {\n var args = [];\n for (var i = 1; i < arguments.length; i++) args.push(arguments[i]);\n var doError = (type === 'error');\n\n var events = this._events;\n if (events !== undefined)\n doError = (doError && events.error === undefined);\n else if (!doError)\n return false;\n\n // If there is no 'error' event listener then throw.\n if (doError) {\n var er;\n if (args.length > 0)\n er = args[0];\n if (er instanceof Error) {\n // Note: The comments on the `throw` lines are intentional, they show\n // up in Node's output if this results in an unhandled exception.\n throw er; // Unhandled 'error' event\n }\n // At least give some kind of context to the user\n var err = new Error('Unhandled error.' + (er ? ' (' + er.message + ')' : ''));\n err.context = er;\n throw err; // Unhandled 'error' event\n }\n\n var handler = events[type];\n\n if (handler === undefined)\n return false;\n\n if (typeof handler === 'function') {\n ReflectApply(handler, this, args);\n } else {\n var len = handler.length;\n var listeners = arrayClone(handler, len);\n for (var i = 0; i < len; ++i)\n ReflectApply(listeners[i], this, args);\n }\n\n return true;\n};\n\nfunction _addListener(target, type, listener, prepend) {\n var m;\n var events;\n var existing;\n\n checkListener(listener);\n\n events = target._events;\n if (events === undefined) {\n events = target._events = Object.create(null);\n target._eventsCount = 0;\n } else {\n // To avoid recursion in the case that type === \"newListener\"! Before\n // adding it to the listeners, first emit \"newListener\".\n if (events.newListener !== undefined) {\n target.emit('newListener', type,\n listener.listener ? listener.listener : listener);\n\n // Re-assign `events` because a newListener handler could have caused the\n // this._events to be assigned to a new object\n events = target._events;\n }\n existing = events[type];\n }\n\n if (existing === undefined) {\n // Optimize the case of one listener. Don't need the extra array object.\n existing = events[type] = listener;\n ++target._eventsCount;\n } else {\n if (typeof existing === 'function') {\n // Adding the second element, need to change to array.\n existing = events[type] =\n prepend ? [listener, existing] : [existing, listener];\n // If we've already got an array, just append.\n } else if (prepend) {\n existing.unshift(listener);\n } else {\n existing.push(listener);\n }\n\n // Check for listener leak\n m = _getMaxListeners(target);\n if (m > 0 && existing.length > m && !existing.warned) {\n existing.warned = true;\n // No error code for this since it is a Warning\n // eslint-disable-next-line no-restricted-syntax\n var w = new Error('Possible EventEmitter memory leak detected. ' +\n existing.length + ' ' + String(type) + ' listeners ' +\n 'added. Use emitter.setMaxListeners() to ' +\n 'increase limit');\n w.name = 'MaxListenersExceededWarning';\n w.emitter = target;\n w.type = type;\n w.count = existing.length;\n ProcessEmitWarning(w);\n }\n }\n\n return target;\n}\n\nEventEmitter.prototype.addListener = function addListener(type, listener) {\n return _addListener(this, type, listener, false);\n};\n\nEventEmitter.prototype.on = EventEmitter.prototype.addListener;\n\nEventEmitter.prototype.prependListener =\n function prependListener(type, listener) {\n return _addListener(this, type, listener, true);\n };\n\nfunction onceWrapper() {\n if (!this.fired) {\n this.target.removeListener(this.type, this.wrapFn);\n this.fired = true;\n if (arguments.length === 0)\n return this.listener.call(this.target);\n return this.listener.apply(this.target, arguments);\n }\n}\n\nfunction _onceWrap(target, type, listener) {\n var state = { fired: false, wrapFn: undefined, target: target, type: type, listener: listener };\n var wrapped = onceWrapper.bind(state);\n wrapped.listener = listener;\n state.wrapFn = wrapped;\n return wrapped;\n}\n\nEventEmitter.prototype.once = function once(type, listener) {\n checkListener(listener);\n this.on(type, _onceWrap(this, type, listener));\n return this;\n};\n\nEventEmitter.prototype.prependOnceListener =\n function prependOnceListener(type, listener) {\n checkListener(listener);\n this.prependListener(type, _onceWrap(this, type, listener));\n return this;\n };\n\n// Emits a 'removeListener' event if and only if the listener was removed.\nEventEmitter.prototype.removeListener =\n function removeListener(type, listener) {\n var list, events, position, i, originalListener;\n\n checkListener(listener);\n\n events = this._events;\n if (events === undefined)\n return this;\n\n list = events[type];\n if (list === undefined)\n return this;\n\n if (list === listener || list.listener === listener) {\n if (--this._eventsCount === 0)\n this._events = Object.create(null);\n else {\n delete events[type];\n if (events.removeListener)\n this.emit('removeListener', type, list.listener || listener);\n }\n } else if (typeof list !== 'function') {\n position = -1;\n\n for (i = list.length - 1; i >= 0; i--) {\n if (list[i] === listener || list[i].listener === listener) {\n originalListener = list[i].listener;\n position = i;\n break;\n }\n }\n\n if (position < 0)\n return this;\n\n if (position === 0)\n list.shift();\n else {\n spliceOne(list, position);\n }\n\n if (list.length === 1)\n events[type] = list[0];\n\n if (events.removeListener !== undefined)\n this.emit('removeListener', type, originalListener || listener);\n }\n\n return this;\n };\n\nEventEmitter.prototype.off = EventEmitter.prototype.removeListener;\n\nEventEmitter.prototype.removeAllListeners =\n function removeAllListeners(type) {\n var listeners, events, i;\n\n events = this._events;\n if (events === undefined)\n return this;\n\n // not listening for removeListener, no need to emit\n if (events.removeListener === undefined) {\n if (arguments.length === 0) {\n this._events = Object.create(null);\n this._eventsCount = 0;\n } else if (events[type] !== undefined) {\n if (--this._eventsCount === 0)\n this._events = Object.create(null);\n else\n delete events[type];\n }\n return this;\n }\n\n // emit removeListener for all listeners on all events\n if (arguments.length === 0) {\n var keys = Object.keys(events);\n var key;\n for (i = 0; i < keys.length; ++i) {\n key = keys[i];\n if (key === 'removeListener') continue;\n this.removeAllListeners(key);\n }\n this.removeAllListeners('removeListener');\n this._events = Object.create(null);\n this._eventsCount = 0;\n return this;\n }\n\n listeners = events[type];\n\n if (typeof listeners === 'function') {\n this.removeListener(type, listeners);\n } else if (listeners !== undefined) {\n // LIFO order\n for (i = listeners.length - 1; i >= 0; i--) {\n this.removeListener(type, listeners[i]);\n }\n }\n\n return this;\n };\n\nfunction _listeners(target, type, unwrap) {\n var events = target._events;\n\n if (events === undefined)\n return [];\n\n var evlistener = events[type];\n if (evlistener === undefined)\n return [];\n\n if (typeof evlistener === 'function')\n return unwrap ? [evlistener.listener || evlistener] : [evlistener];\n\n return unwrap ?\n unwrapListeners(evlistener) : arrayClone(evlistener, evlistener.length);\n}\n\nEventEmitter.prototype.listeners = function listeners(type) {\n return _listeners(this, type, true);\n};\n\nEventEmitter.prototype.rawListeners = function rawListeners(type) {\n return _listeners(this, type, false);\n};\n\nEventEmitter.listenerCount = function(emitter, type) {\n if (typeof emitter.listenerCount === 'function') {\n return emitter.listenerCount(type);\n } else {\n return listenerCount.call(emitter, type);\n }\n};\n\nEventEmitter.prototype.listenerCount = listenerCount;\nfunction listenerCount(type) {\n var events = this._events;\n\n if (events !== undefined) {\n var evlistener = events[type];\n\n if (typeof evlistener === 'function') {\n return 1;\n } else if (evlistener !== undefined) {\n return evlistener.length;\n }\n }\n\n return 0;\n}\n\nEventEmitter.prototype.eventNames = function eventNames() {\n return this._eventsCount > 0 ? ReflectOwnKeys(this._events) : [];\n};\n\nfunction arrayClone(arr, n) {\n var copy = new Array(n);\n for (var i = 0; i < n; ++i)\n copy[i] = arr[i];\n return copy;\n}\n\nfunction spliceOne(list, index) {\n for (; index + 1 < list.length; index++)\n list[index] = list[index + 1];\n list.pop();\n}\n\nfunction unwrapListeners(arr) {\n var ret = new Array(arr.length);\n for (var i = 0; i < ret.length; ++i) {\n ret[i] = arr[i].listener || arr[i];\n }\n return ret;\n}\n\nfunction once(emitter, name) {\n return new Promise(function (resolve, reject) {\n function errorListener(err) {\n emitter.removeListener(name, resolver);\n reject(err);\n }\n\n function resolver() {\n if (typeof emitter.removeListener === 'function') {\n emitter.removeListener('error', errorListener);\n }\n resolve([].slice.call(arguments));\n };\n\n eventTargetAgnosticAddListener(emitter, name, resolver, { once: true });\n if (name !== 'error') {\n addErrorHandlerIfEventEmitter(emitter, errorListener, { once: true });\n }\n });\n}\n\nfunction addErrorHandlerIfEventEmitter(emitter, handler, flags) {\n if (typeof emitter.on === 'function') {\n eventTargetAgnosticAddListener(emitter, 'error', handler, flags);\n }\n}\n\nfunction eventTargetAgnosticAddListener(emitter, name, listener, flags) {\n if (typeof emitter.on === 'function') {\n if (flags.once) {\n emitter.once(name, listener);\n } else {\n emitter.on(name, listener);\n }\n } else if (typeof emitter.addEventListener === 'function') {\n // EventTarget does not have `error` event semantics like Node\n // EventEmitters, we do not listen for `error` events here.\n emitter.addEventListener(name, function wrapListener(arg) {\n // IE does not have builtin `{ once: true }` support so we\n // have to do it manually.\n if (flags.once) {\n emitter.removeEventListener(name, wrapListener);\n }\n listener(arg);\n });\n } else {\n throw new TypeError('The \"emitter\" argument must be of type EventEmitter. Received type ' + typeof emitter);\n }\n}\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/events/events.js?"); + +/***/ }), + +/***/ "./node_modules/ieee754/index.js": +/*!***************************************!*\ + !*** ./node_modules/ieee754/index.js ***! + \***************************************/ +/***/ ((__unused_webpack_module, exports) => { + +eval("/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh */\nexports.read = function (buffer, offset, isLE, mLen, nBytes) {\n var e, m\n var eLen = (nBytes * 8) - mLen - 1\n var eMax = (1 << eLen) - 1\n var eBias = eMax >> 1\n var nBits = -7\n var i = isLE ? (nBytes - 1) : 0\n var d = isLE ? -1 : 1\n var s = buffer[offset + i]\n\n i += d\n\n e = s & ((1 << (-nBits)) - 1)\n s >>= (-nBits)\n nBits += eLen\n for (; nBits > 0; e = (e * 256) + buffer[offset + i], i += d, nBits -= 8) {}\n\n m = e & ((1 << (-nBits)) - 1)\n e >>= (-nBits)\n nBits += mLen\n for (; nBits > 0; m = (m * 256) + buffer[offset + i], i += d, nBits -= 8) {}\n\n if (e === 0) {\n e = 1 - eBias\n } else if (e === eMax) {\n return m ? NaN : ((s ? -1 : 1) * Infinity)\n } else {\n m = m + Math.pow(2, mLen)\n e = e - eBias\n }\n return (s ? -1 : 1) * m * Math.pow(2, e - mLen)\n}\n\nexports.write = function (buffer, value, offset, isLE, mLen, nBytes) {\n var e, m, c\n var eLen = (nBytes * 8) - mLen - 1\n var eMax = (1 << eLen) - 1\n var eBias = eMax >> 1\n var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0)\n var i = isLE ? 0 : (nBytes - 1)\n var d = isLE ? 1 : -1\n var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0\n\n value = Math.abs(value)\n\n if (isNaN(value) || value === Infinity) {\n m = isNaN(value) ? 1 : 0\n e = eMax\n } else {\n e = Math.floor(Math.log(value) / Math.LN2)\n if (value * (c = Math.pow(2, -e)) < 1) {\n e--\n c *= 2\n }\n if (e + eBias >= 1) {\n value += rt / c\n } else {\n value += rt * Math.pow(2, 1 - eBias)\n }\n if (value * c >= 2) {\n e++\n c /= 2\n }\n\n if (e + eBias >= eMax) {\n m = 0\n e = eMax\n } else if (e + eBias >= 1) {\n m = ((value * c) - 1) * Math.pow(2, mLen)\n e = e + eBias\n } else {\n m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen)\n e = 0\n }\n }\n\n for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {}\n\n e = (e << mLen) | m\n eLen += mLen\n for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {}\n\n buffer[offset + i - d] |= s * 128\n}\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/ieee754/index.js?"); + +/***/ }), + +/***/ "./node_modules/inherits/inherits_browser.js": +/*!***************************************************!*\ + !*** ./node_modules/inherits/inherits_browser.js ***! + \***************************************************/ +/***/ ((module) => { + +eval("if (typeof Object.create === 'function') {\n // implementation from standard node.js 'util' module\n module.exports = function inherits(ctor, superCtor) {\n if (superCtor) {\n ctor.super_ = superCtor\n ctor.prototype = Object.create(superCtor.prototype, {\n constructor: {\n value: ctor,\n enumerable: false,\n writable: true,\n configurable: true\n }\n })\n }\n };\n} else {\n // old school shim for old browsers\n module.exports = function inherits(ctor, superCtor) {\n if (superCtor) {\n ctor.super_ = superCtor\n var TempCtor = function () {}\n TempCtor.prototype = superCtor.prototype\n ctor.prototype = new TempCtor()\n ctor.prototype.constructor = ctor\n }\n }\n}\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/inherits/inherits_browser.js?"); + +/***/ }), + +/***/ "./node_modules/process/browser.js": +/*!*****************************************!*\ + !*** ./node_modules/process/browser.js ***! + \*****************************************/ +/***/ ((module) => { + +eval("// shim for using process in browser\nvar process = module.exports = {};\n\n// cached from whatever global is present so that test runners that stub it\n// don't break things. But we need to wrap it in a try catch in case it is\n// wrapped in strict mode code which doesn't define any globals. It's inside a\n// function because try/catches deoptimize in certain engines.\n\nvar cachedSetTimeout;\nvar cachedClearTimeout;\n\nfunction defaultSetTimout() {\n throw new Error('setTimeout has not been defined');\n}\nfunction defaultClearTimeout () {\n throw new Error('clearTimeout has not been defined');\n}\n(function () {\n try {\n if (typeof setTimeout === 'function') {\n cachedSetTimeout = setTimeout;\n } else {\n cachedSetTimeout = defaultSetTimout;\n }\n } catch (e) {\n cachedSetTimeout = defaultSetTimout;\n }\n try {\n if (typeof clearTimeout === 'function') {\n cachedClearTimeout = clearTimeout;\n } else {\n cachedClearTimeout = defaultClearTimeout;\n }\n } catch (e) {\n cachedClearTimeout = defaultClearTimeout;\n }\n} ())\nfunction runTimeout(fun) {\n if (cachedSetTimeout === setTimeout) {\n //normal enviroments in sane situations\n return setTimeout(fun, 0);\n }\n // if setTimeout wasn't available but was latter defined\n if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) {\n cachedSetTimeout = setTimeout;\n return setTimeout(fun, 0);\n }\n try {\n // when when somebody has screwed with setTimeout but no I.E. maddness\n return cachedSetTimeout(fun, 0);\n } catch(e){\n try {\n // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally\n return cachedSetTimeout.call(null, fun, 0);\n } catch(e){\n // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error\n return cachedSetTimeout.call(this, fun, 0);\n }\n }\n\n\n}\nfunction runClearTimeout(marker) {\n if (cachedClearTimeout === clearTimeout) {\n //normal enviroments in sane situations\n return clearTimeout(marker);\n }\n // if clearTimeout wasn't available but was latter defined\n if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) {\n cachedClearTimeout = clearTimeout;\n return clearTimeout(marker);\n }\n try {\n // when when somebody has screwed with setTimeout but no I.E. maddness\n return cachedClearTimeout(marker);\n } catch (e){\n try {\n // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally\n return cachedClearTimeout.call(null, marker);\n } catch (e){\n // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error.\n // Some versions of I.E. have different rules for clearTimeout vs setTimeout\n return cachedClearTimeout.call(this, marker);\n }\n }\n\n\n\n}\nvar queue = [];\nvar draining = false;\nvar currentQueue;\nvar queueIndex = -1;\n\nfunction cleanUpNextTick() {\n if (!draining || !currentQueue) {\n return;\n }\n draining = false;\n if (currentQueue.length) {\n queue = currentQueue.concat(queue);\n } else {\n queueIndex = -1;\n }\n if (queue.length) {\n drainQueue();\n }\n}\n\nfunction drainQueue() {\n if (draining) {\n return;\n }\n var timeout = runTimeout(cleanUpNextTick);\n draining = true;\n\n var len = queue.length;\n while(len) {\n currentQueue = queue;\n queue = [];\n while (++queueIndex < len) {\n if (currentQueue) {\n currentQueue[queueIndex].run();\n }\n }\n queueIndex = -1;\n len = queue.length;\n }\n currentQueue = null;\n draining = false;\n runClearTimeout(timeout);\n}\n\nprocess.nextTick = function (fun) {\n var args = new Array(arguments.length - 1);\n if (arguments.length > 1) {\n for (var i = 1; i < arguments.length; i++) {\n args[i - 1] = arguments[i];\n }\n }\n queue.push(new Item(fun, args));\n if (queue.length === 1 && !draining) {\n runTimeout(drainQueue);\n }\n};\n\n// v8 likes predictible objects\nfunction Item(fun, array) {\n this.fun = fun;\n this.array = array;\n}\nItem.prototype.run = function () {\n this.fun.apply(null, this.array);\n};\nprocess.title = 'browser';\nprocess.browser = true;\nprocess.env = {};\nprocess.argv = [];\nprocess.version = ''; // empty string to avoid regexp issues\nprocess.versions = {};\n\nfunction noop() {}\n\nprocess.on = noop;\nprocess.addListener = noop;\nprocess.once = noop;\nprocess.off = noop;\nprocess.removeListener = noop;\nprocess.removeAllListeners = noop;\nprocess.emit = noop;\nprocess.prependListener = noop;\nprocess.prependOnceListener = noop;\n\nprocess.listeners = function (name) { return [] }\n\nprocess.binding = function (name) {\n throw new Error('process.binding is not supported');\n};\n\nprocess.cwd = function () { return '/' };\nprocess.chdir = function (dir) {\n throw new Error('process.chdir is not supported');\n};\nprocess.umask = function() { return 0; };\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/process/browser.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/errors-browser.js": +/*!********************************************************!*\ + !*** ./node_modules/readable-stream/errors-browser.js ***! + \********************************************************/ +/***/ ((module) => { + +"use strict"; +eval("\n\nfunction _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; }\n\nvar codes = {};\n\nfunction createErrorType(code, message, Base) {\n if (!Base) {\n Base = Error;\n }\n\n function getMessage(arg1, arg2, arg3) {\n if (typeof message === 'string') {\n return message;\n } else {\n return message(arg1, arg2, arg3);\n }\n }\n\n var NodeError =\n /*#__PURE__*/\n function (_Base) {\n _inheritsLoose(NodeError, _Base);\n\n function NodeError(arg1, arg2, arg3) {\n return _Base.call(this, getMessage(arg1, arg2, arg3)) || this;\n }\n\n return NodeError;\n }(Base);\n\n NodeError.prototype.name = Base.name;\n NodeError.prototype.code = code;\n codes[code] = NodeError;\n} // https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js\n\n\nfunction oneOf(expected, thing) {\n if (Array.isArray(expected)) {\n var len = expected.length;\n expected = expected.map(function (i) {\n return String(i);\n });\n\n if (len > 2) {\n return \"one of \".concat(thing, \" \").concat(expected.slice(0, len - 1).join(', '), \", or \") + expected[len - 1];\n } else if (len === 2) {\n return \"one of \".concat(thing, \" \").concat(expected[0], \" or \").concat(expected[1]);\n } else {\n return \"of \".concat(thing, \" \").concat(expected[0]);\n }\n } else {\n return \"of \".concat(thing, \" \").concat(String(expected));\n }\n} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith\n\n\nfunction startsWith(str, search, pos) {\n return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search;\n} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith\n\n\nfunction endsWith(str, search, this_len) {\n if (this_len === undefined || this_len > str.length) {\n this_len = str.length;\n }\n\n return str.substring(this_len - search.length, this_len) === search;\n} // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes\n\n\nfunction includes(str, search, start) {\n if (typeof start !== 'number') {\n start = 0;\n }\n\n if (start + search.length > str.length) {\n return false;\n } else {\n return str.indexOf(search, start) !== -1;\n }\n}\n\ncreateErrorType('ERR_INVALID_OPT_VALUE', function (name, value) {\n return 'The value \"' + value + '\" is invalid for option \"' + name + '\"';\n}, TypeError);\ncreateErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) {\n // determiner: 'must be' or 'must not be'\n var determiner;\n\n if (typeof expected === 'string' && startsWith(expected, 'not ')) {\n determiner = 'must not be';\n expected = expected.replace(/^not /, '');\n } else {\n determiner = 'must be';\n }\n\n var msg;\n\n if (endsWith(name, ' argument')) {\n // For cases like 'first argument'\n msg = \"The \".concat(name, \" \").concat(determiner, \" \").concat(oneOf(expected, 'type'));\n } else {\n var type = includes(name, '.') ? 'property' : 'argument';\n msg = \"The \\\"\".concat(name, \"\\\" \").concat(type, \" \").concat(determiner, \" \").concat(oneOf(expected, 'type'));\n }\n\n msg += \". Received type \".concat(typeof actual);\n return msg;\n}, TypeError);\ncreateErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF');\ncreateErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) {\n return 'The ' + name + ' method is not implemented';\n});\ncreateErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close');\ncreateErrorType('ERR_STREAM_DESTROYED', function (name) {\n return 'Cannot call ' + name + ' after a stream was destroyed';\n});\ncreateErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times');\ncreateErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable');\ncreateErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end');\ncreateErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError);\ncreateErrorType('ERR_UNKNOWN_ENCODING', function (arg) {\n return 'Unknown encoding: ' + arg;\n}, TypeError);\ncreateErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event');\nmodule.exports.codes = codes;\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/errors-browser.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/_stream_duplex.js": +/*!************************************************************!*\ + !*** ./node_modules/readable-stream/lib/_stream_duplex.js ***! + \************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("/* provided dependency */ var process = __webpack_require__(/*! process/browser */ \"./node_modules/process/browser.js\");\n// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to permit\n// persons to whom the Software is furnished to do so, subject to the\n// following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n// USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n// a duplex stream is just a stream that is both readable and writable.\n// Since JS doesn't have multiple prototypal inheritance, this class\n// prototypally inherits from Readable, and then parasitically from\n// Writable.\n\n\n\n/**/\nvar objectKeys = Object.keys || function (obj) {\n var keys = [];\n for (var key in obj) keys.push(key);\n return keys;\n};\n/**/\n\nmodule.exports = Duplex;\nvar Readable = __webpack_require__(/*! ./_stream_readable */ \"./node_modules/readable-stream/lib/_stream_readable.js\");\nvar Writable = __webpack_require__(/*! ./_stream_writable */ \"./node_modules/readable-stream/lib/_stream_writable.js\");\n__webpack_require__(/*! inherits */ \"./node_modules/inherits/inherits_browser.js\")(Duplex, Readable);\n{\n // Allow the keys array to be GC'ed.\n var keys = objectKeys(Writable.prototype);\n for (var v = 0; v < keys.length; v++) {\n var method = keys[v];\n if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method];\n }\n}\nfunction Duplex(options) {\n if (!(this instanceof Duplex)) return new Duplex(options);\n Readable.call(this, options);\n Writable.call(this, options);\n this.allowHalfOpen = true;\n if (options) {\n if (options.readable === false) this.readable = false;\n if (options.writable === false) this.writable = false;\n if (options.allowHalfOpen === false) {\n this.allowHalfOpen = false;\n this.once('end', onend);\n }\n }\n}\nObject.defineProperty(Duplex.prototype, 'writableHighWaterMark', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._writableState.highWaterMark;\n }\n});\nObject.defineProperty(Duplex.prototype, 'writableBuffer', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._writableState && this._writableState.getBuffer();\n }\n});\nObject.defineProperty(Duplex.prototype, 'writableLength', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._writableState.length;\n }\n});\n\n// the no-half-open enforcer\nfunction onend() {\n // If the writable side ended, then we're ok.\n if (this._writableState.ended) return;\n\n // no more data can be written.\n // But allow more writes to happen in this tick.\n process.nextTick(onEndNT, this);\n}\nfunction onEndNT(self) {\n self.end();\n}\nObject.defineProperty(Duplex.prototype, 'destroyed', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n if (this._readableState === undefined || this._writableState === undefined) {\n return false;\n }\n return this._readableState.destroyed && this._writableState.destroyed;\n },\n set: function set(value) {\n // we ignore the value if the stream\n // has not been initialized yet\n if (this._readableState === undefined || this._writableState === undefined) {\n return;\n }\n\n // backward compatibility, the user is explicitly\n // managing destroyed\n this._readableState.destroyed = value;\n this._writableState.destroyed = value;\n }\n});\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/_stream_duplex.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/_stream_passthrough.js": +/*!*****************************************************************!*\ + !*** ./node_modules/readable-stream/lib/_stream_passthrough.js ***! + \*****************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to permit\n// persons to whom the Software is furnished to do so, subject to the\n// following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n// USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n// a passthrough stream.\n// basically just the most minimal sort of Transform stream.\n// Every written chunk gets output as-is.\n\n\n\nmodule.exports = PassThrough;\nvar Transform = __webpack_require__(/*! ./_stream_transform */ \"./node_modules/readable-stream/lib/_stream_transform.js\");\n__webpack_require__(/*! inherits */ \"./node_modules/inherits/inherits_browser.js\")(PassThrough, Transform);\nfunction PassThrough(options) {\n if (!(this instanceof PassThrough)) return new PassThrough(options);\n Transform.call(this, options);\n}\nPassThrough.prototype._transform = function (chunk, encoding, cb) {\n cb(null, chunk);\n};\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/_stream_passthrough.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/_stream_readable.js": +/*!**************************************************************!*\ + !*** ./node_modules/readable-stream/lib/_stream_readable.js ***! + \**************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("/* provided dependency */ var process = __webpack_require__(/*! process/browser */ \"./node_modules/process/browser.js\");\n// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to permit\n// persons to whom the Software is furnished to do so, subject to the\n// following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n// USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\nmodule.exports = Readable;\n\n/**/\nvar Duplex;\n/**/\n\nReadable.ReadableState = ReadableState;\n\n/**/\nvar EE = (__webpack_require__(/*! events */ \"./node_modules/events/events.js\").EventEmitter);\nvar EElistenerCount = function EElistenerCount(emitter, type) {\n return emitter.listeners(type).length;\n};\n/**/\n\n/**/\nvar Stream = __webpack_require__(/*! ./internal/streams/stream */ \"./node_modules/readable-stream/lib/internal/streams/stream-browser.js\");\n/**/\n\nvar Buffer = (__webpack_require__(/*! buffer */ \"./node_modules/buffer/index.js\").Buffer);\nvar OurUint8Array = (typeof __webpack_require__.g !== 'undefined' ? __webpack_require__.g : typeof window !== 'undefined' ? window : typeof self !== 'undefined' ? self : {}).Uint8Array || function () {};\nfunction _uint8ArrayToBuffer(chunk) {\n return Buffer.from(chunk);\n}\nfunction _isUint8Array(obj) {\n return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;\n}\n\n/**/\nvar debugUtil = __webpack_require__(/*! util */ \"?d17e\");\nvar debug;\nif (debugUtil && debugUtil.debuglog) {\n debug = debugUtil.debuglog('stream');\n} else {\n debug = function debug() {};\n}\n/**/\n\nvar BufferList = __webpack_require__(/*! ./internal/streams/buffer_list */ \"./node_modules/readable-stream/lib/internal/streams/buffer_list.js\");\nvar destroyImpl = __webpack_require__(/*! ./internal/streams/destroy */ \"./node_modules/readable-stream/lib/internal/streams/destroy.js\");\nvar _require = __webpack_require__(/*! ./internal/streams/state */ \"./node_modules/readable-stream/lib/internal/streams/state.js\"),\n getHighWaterMark = _require.getHighWaterMark;\nvar _require$codes = (__webpack_require__(/*! ../errors */ \"./node_modules/readable-stream/errors-browser.js\").codes),\n ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,\n ERR_STREAM_PUSH_AFTER_EOF = _require$codes.ERR_STREAM_PUSH_AFTER_EOF,\n ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,\n ERR_STREAM_UNSHIFT_AFTER_END_EVENT = _require$codes.ERR_STREAM_UNSHIFT_AFTER_END_EVENT;\n\n// Lazy loaded to improve the startup performance.\nvar StringDecoder;\nvar createReadableStreamAsyncIterator;\nvar from;\n__webpack_require__(/*! inherits */ \"./node_modules/inherits/inherits_browser.js\")(Readable, Stream);\nvar errorOrDestroy = destroyImpl.errorOrDestroy;\nvar kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume'];\nfunction prependListener(emitter, event, fn) {\n // Sadly this is not cacheable as some libraries bundle their own\n // event emitter implementation with them.\n if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn);\n\n // This is a hack to make sure that our error handler is attached before any\n // userland ones. NEVER DO THIS. This is here only because this code needs\n // to continue to work with older versions of Node.js that do not include\n // the prependListener() method. The goal is to eventually remove this hack.\n if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (Array.isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]];\n}\nfunction ReadableState(options, stream, isDuplex) {\n Duplex = Duplex || __webpack_require__(/*! ./_stream_duplex */ \"./node_modules/readable-stream/lib/_stream_duplex.js\");\n options = options || {};\n\n // Duplex streams are both readable and writable, but share\n // the same options object.\n // However, some cases require setting options to different\n // values for the readable and the writable sides of the duplex stream.\n // These options can be provided separately as readableXXX and writableXXX.\n if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex;\n\n // object stream flag. Used to make read(n) ignore n and to\n // make all the buffer merging and length checks go away\n this.objectMode = !!options.objectMode;\n if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode;\n\n // the point at which it stops calling _read() to fill the buffer\n // Note: 0 is a valid value, means \"don't call _read preemptively ever\"\n this.highWaterMark = getHighWaterMark(this, options, 'readableHighWaterMark', isDuplex);\n\n // A linked list is used to store data chunks instead of an array because the\n // linked list can remove elements from the beginning faster than\n // array.shift()\n this.buffer = new BufferList();\n this.length = 0;\n this.pipes = null;\n this.pipesCount = 0;\n this.flowing = null;\n this.ended = false;\n this.endEmitted = false;\n this.reading = false;\n\n // a flag to be able to tell if the event 'readable'/'data' is emitted\n // immediately, or on a later tick. We set this to true at first, because\n // any actions that shouldn't happen until \"later\" should generally also\n // not happen before the first read call.\n this.sync = true;\n\n // whenever we return null, then we set a flag to say\n // that we're awaiting a 'readable' event emission.\n this.needReadable = false;\n this.emittedReadable = false;\n this.readableListening = false;\n this.resumeScheduled = false;\n this.paused = true;\n\n // Should close be emitted on destroy. Defaults to true.\n this.emitClose = options.emitClose !== false;\n\n // Should .destroy() be called after 'end' (and potentially 'finish')\n this.autoDestroy = !!options.autoDestroy;\n\n // has it been destroyed\n this.destroyed = false;\n\n // Crypto is kind of old and crusty. Historically, its default string\n // encoding is 'binary' so we have to make this configurable.\n // Everything else in the universe uses 'utf8', though.\n this.defaultEncoding = options.defaultEncoding || 'utf8';\n\n // the number of writers that are awaiting a drain event in .pipe()s\n this.awaitDrain = 0;\n\n // if true, a maybeReadMore has been scheduled\n this.readingMore = false;\n this.decoder = null;\n this.encoding = null;\n if (options.encoding) {\n if (!StringDecoder) StringDecoder = (__webpack_require__(/*! string_decoder/ */ \"./node_modules/string_decoder/lib/string_decoder.js\").StringDecoder);\n this.decoder = new StringDecoder(options.encoding);\n this.encoding = options.encoding;\n }\n}\nfunction Readable(options) {\n Duplex = Duplex || __webpack_require__(/*! ./_stream_duplex */ \"./node_modules/readable-stream/lib/_stream_duplex.js\");\n if (!(this instanceof Readable)) return new Readable(options);\n\n // Checking for a Stream.Duplex instance is faster here instead of inside\n // the ReadableState constructor, at least with V8 6.5\n var isDuplex = this instanceof Duplex;\n this._readableState = new ReadableState(options, this, isDuplex);\n\n // legacy\n this.readable = true;\n if (options) {\n if (typeof options.read === 'function') this._read = options.read;\n if (typeof options.destroy === 'function') this._destroy = options.destroy;\n }\n Stream.call(this);\n}\nObject.defineProperty(Readable.prototype, 'destroyed', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n if (this._readableState === undefined) {\n return false;\n }\n return this._readableState.destroyed;\n },\n set: function set(value) {\n // we ignore the value if the stream\n // has not been initialized yet\n if (!this._readableState) {\n return;\n }\n\n // backward compatibility, the user is explicitly\n // managing destroyed\n this._readableState.destroyed = value;\n }\n});\nReadable.prototype.destroy = destroyImpl.destroy;\nReadable.prototype._undestroy = destroyImpl.undestroy;\nReadable.prototype._destroy = function (err, cb) {\n cb(err);\n};\n\n// Manually shove something into the read() buffer.\n// This returns true if the highWaterMark has not been hit yet,\n// similar to how Writable.write() returns true if you should\n// write() some more.\nReadable.prototype.push = function (chunk, encoding) {\n var state = this._readableState;\n var skipChunkCheck;\n if (!state.objectMode) {\n if (typeof chunk === 'string') {\n encoding = encoding || state.defaultEncoding;\n if (encoding !== state.encoding) {\n chunk = Buffer.from(chunk, encoding);\n encoding = '';\n }\n skipChunkCheck = true;\n }\n } else {\n skipChunkCheck = true;\n }\n return readableAddChunk(this, chunk, encoding, false, skipChunkCheck);\n};\n\n// Unshift should *always* be something directly out of read()\nReadable.prototype.unshift = function (chunk) {\n return readableAddChunk(this, chunk, null, true, false);\n};\nfunction readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) {\n debug('readableAddChunk', chunk);\n var state = stream._readableState;\n if (chunk === null) {\n state.reading = false;\n onEofChunk(stream, state);\n } else {\n var er;\n if (!skipChunkCheck) er = chunkInvalid(state, chunk);\n if (er) {\n errorOrDestroy(stream, er);\n } else if (state.objectMode || chunk && chunk.length > 0) {\n if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) {\n chunk = _uint8ArrayToBuffer(chunk);\n }\n if (addToFront) {\n if (state.endEmitted) errorOrDestroy(stream, new ERR_STREAM_UNSHIFT_AFTER_END_EVENT());else addChunk(stream, state, chunk, true);\n } else if (state.ended) {\n errorOrDestroy(stream, new ERR_STREAM_PUSH_AFTER_EOF());\n } else if (state.destroyed) {\n return false;\n } else {\n state.reading = false;\n if (state.decoder && !encoding) {\n chunk = state.decoder.write(chunk);\n if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state);\n } else {\n addChunk(stream, state, chunk, false);\n }\n }\n } else if (!addToFront) {\n state.reading = false;\n maybeReadMore(stream, state);\n }\n }\n\n // We can push more data if we are below the highWaterMark.\n // Also, if we have no data yet, we can stand some more bytes.\n // This is to work around cases where hwm=0, such as the repl.\n return !state.ended && (state.length < state.highWaterMark || state.length === 0);\n}\nfunction addChunk(stream, state, chunk, addToFront) {\n if (state.flowing && state.length === 0 && !state.sync) {\n state.awaitDrain = 0;\n stream.emit('data', chunk);\n } else {\n // update the buffer info.\n state.length += state.objectMode ? 1 : chunk.length;\n if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk);\n if (state.needReadable) emitReadable(stream);\n }\n maybeReadMore(stream, state);\n}\nfunction chunkInvalid(state, chunk) {\n var er;\n if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) {\n er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer', 'Uint8Array'], chunk);\n }\n return er;\n}\nReadable.prototype.isPaused = function () {\n return this._readableState.flowing === false;\n};\n\n// backwards compatibility.\nReadable.prototype.setEncoding = function (enc) {\n if (!StringDecoder) StringDecoder = (__webpack_require__(/*! string_decoder/ */ \"./node_modules/string_decoder/lib/string_decoder.js\").StringDecoder);\n var decoder = new StringDecoder(enc);\n this._readableState.decoder = decoder;\n // If setEncoding(null), decoder.encoding equals utf8\n this._readableState.encoding = this._readableState.decoder.encoding;\n\n // Iterate over current buffer to convert already stored Buffers:\n var p = this._readableState.buffer.head;\n var content = '';\n while (p !== null) {\n content += decoder.write(p.data);\n p = p.next;\n }\n this._readableState.buffer.clear();\n if (content !== '') this._readableState.buffer.push(content);\n this._readableState.length = content.length;\n return this;\n};\n\n// Don't raise the hwm > 1GB\nvar MAX_HWM = 0x40000000;\nfunction computeNewHighWaterMark(n) {\n if (n >= MAX_HWM) {\n // TODO(ronag): Throw ERR_VALUE_OUT_OF_RANGE.\n n = MAX_HWM;\n } else {\n // Get the next highest power of 2 to prevent increasing hwm excessively in\n // tiny amounts\n n--;\n n |= n >>> 1;\n n |= n >>> 2;\n n |= n >>> 4;\n n |= n >>> 8;\n n |= n >>> 16;\n n++;\n }\n return n;\n}\n\n// This function is designed to be inlinable, so please take care when making\n// changes to the function body.\nfunction howMuchToRead(n, state) {\n if (n <= 0 || state.length === 0 && state.ended) return 0;\n if (state.objectMode) return 1;\n if (n !== n) {\n // Only flow one buffer at a time\n if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length;\n }\n // If we're asking for more than the current hwm, then raise the hwm.\n if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n);\n if (n <= state.length) return n;\n // Don't have enough\n if (!state.ended) {\n state.needReadable = true;\n return 0;\n }\n return state.length;\n}\n\n// you can override either this method, or the async _read(n) below.\nReadable.prototype.read = function (n) {\n debug('read', n);\n n = parseInt(n, 10);\n var state = this._readableState;\n var nOrig = n;\n if (n !== 0) state.emittedReadable = false;\n\n // if we're doing read(0) to trigger a readable event, but we\n // already have a bunch of data in the buffer, then just trigger\n // the 'readable' event and move on.\n if (n === 0 && state.needReadable && ((state.highWaterMark !== 0 ? state.length >= state.highWaterMark : state.length > 0) || state.ended)) {\n debug('read: emitReadable', state.length, state.ended);\n if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this);\n return null;\n }\n n = howMuchToRead(n, state);\n\n // if we've ended, and we're now clear, then finish it up.\n if (n === 0 && state.ended) {\n if (state.length === 0) endReadable(this);\n return null;\n }\n\n // All the actual chunk generation logic needs to be\n // *below* the call to _read. The reason is that in certain\n // synthetic stream cases, such as passthrough streams, _read\n // may be a completely synchronous operation which may change\n // the state of the read buffer, providing enough data when\n // before there was *not* enough.\n //\n // So, the steps are:\n // 1. Figure out what the state of things will be after we do\n // a read from the buffer.\n //\n // 2. If that resulting state will trigger a _read, then call _read.\n // Note that this may be asynchronous, or synchronous. Yes, it is\n // deeply ugly to write APIs this way, but that still doesn't mean\n // that the Readable class should behave improperly, as streams are\n // designed to be sync/async agnostic.\n // Take note if the _read call is sync or async (ie, if the read call\n // has returned yet), so that we know whether or not it's safe to emit\n // 'readable' etc.\n //\n // 3. Actually pull the requested chunks out of the buffer and return.\n\n // if we need a readable event, then we need to do some reading.\n var doRead = state.needReadable;\n debug('need readable', doRead);\n\n // if we currently have less than the highWaterMark, then also read some\n if (state.length === 0 || state.length - n < state.highWaterMark) {\n doRead = true;\n debug('length less than watermark', doRead);\n }\n\n // however, if we've ended, then there's no point, and if we're already\n // reading, then it's unnecessary.\n if (state.ended || state.reading) {\n doRead = false;\n debug('reading or ended', doRead);\n } else if (doRead) {\n debug('do read');\n state.reading = true;\n state.sync = true;\n // if the length is currently zero, then we *need* a readable event.\n if (state.length === 0) state.needReadable = true;\n // call internal read method\n this._read(state.highWaterMark);\n state.sync = false;\n // If _read pushed data synchronously, then `reading` will be false,\n // and we need to re-evaluate how much data we can return to the user.\n if (!state.reading) n = howMuchToRead(nOrig, state);\n }\n var ret;\n if (n > 0) ret = fromList(n, state);else ret = null;\n if (ret === null) {\n state.needReadable = state.length <= state.highWaterMark;\n n = 0;\n } else {\n state.length -= n;\n state.awaitDrain = 0;\n }\n if (state.length === 0) {\n // If we have nothing in the buffer, then we want to know\n // as soon as we *do* get something into the buffer.\n if (!state.ended) state.needReadable = true;\n\n // If we tried to read() past the EOF, then emit end on the next tick.\n if (nOrig !== n && state.ended) endReadable(this);\n }\n if (ret !== null) this.emit('data', ret);\n return ret;\n};\nfunction onEofChunk(stream, state) {\n debug('onEofChunk');\n if (state.ended) return;\n if (state.decoder) {\n var chunk = state.decoder.end();\n if (chunk && chunk.length) {\n state.buffer.push(chunk);\n state.length += state.objectMode ? 1 : chunk.length;\n }\n }\n state.ended = true;\n if (state.sync) {\n // if we are sync, wait until next tick to emit the data.\n // Otherwise we risk emitting data in the flow()\n // the readable code triggers during a read() call\n emitReadable(stream);\n } else {\n // emit 'readable' now to make sure it gets picked up.\n state.needReadable = false;\n if (!state.emittedReadable) {\n state.emittedReadable = true;\n emitReadable_(stream);\n }\n }\n}\n\n// Don't emit readable right away in sync mode, because this can trigger\n// another read() call => stack overflow. This way, it might trigger\n// a nextTick recursion warning, but that's not so bad.\nfunction emitReadable(stream) {\n var state = stream._readableState;\n debug('emitReadable', state.needReadable, state.emittedReadable);\n state.needReadable = false;\n if (!state.emittedReadable) {\n debug('emitReadable', state.flowing);\n state.emittedReadable = true;\n process.nextTick(emitReadable_, stream);\n }\n}\nfunction emitReadable_(stream) {\n var state = stream._readableState;\n debug('emitReadable_', state.destroyed, state.length, state.ended);\n if (!state.destroyed && (state.length || state.ended)) {\n stream.emit('readable');\n state.emittedReadable = false;\n }\n\n // The stream needs another readable event if\n // 1. It is not flowing, as the flow mechanism will take\n // care of it.\n // 2. It is not ended.\n // 3. It is below the highWaterMark, so we can schedule\n // another readable later.\n state.needReadable = !state.flowing && !state.ended && state.length <= state.highWaterMark;\n flow(stream);\n}\n\n// at this point, the user has presumably seen the 'readable' event,\n// and called read() to consume some data. that may have triggered\n// in turn another _read(n) call, in which case reading = true if\n// it's in progress.\n// However, if we're not ended, or reading, and the length < hwm,\n// then go ahead and try to read some more preemptively.\nfunction maybeReadMore(stream, state) {\n if (!state.readingMore) {\n state.readingMore = true;\n process.nextTick(maybeReadMore_, stream, state);\n }\n}\nfunction maybeReadMore_(stream, state) {\n // Attempt to read more data if we should.\n //\n // The conditions for reading more data are (one of):\n // - Not enough data buffered (state.length < state.highWaterMark). The loop\n // is responsible for filling the buffer with enough data if such data\n // is available. If highWaterMark is 0 and we are not in the flowing mode\n // we should _not_ attempt to buffer any extra data. We'll get more data\n // when the stream consumer calls read() instead.\n // - No data in the buffer, and the stream is in flowing mode. In this mode\n // the loop below is responsible for ensuring read() is called. Failing to\n // call read here would abort the flow and there's no other mechanism for\n // continuing the flow if the stream consumer has just subscribed to the\n // 'data' event.\n //\n // In addition to the above conditions to keep reading data, the following\n // conditions prevent the data from being read:\n // - The stream has ended (state.ended).\n // - There is already a pending 'read' operation (state.reading). This is a\n // case where the the stream has called the implementation defined _read()\n // method, but they are processing the call asynchronously and have _not_\n // called push() with new data. In this case we skip performing more\n // read()s. The execution ends in this method again after the _read() ends\n // up calling push() with more data.\n while (!state.reading && !state.ended && (state.length < state.highWaterMark || state.flowing && state.length === 0)) {\n var len = state.length;\n debug('maybeReadMore read 0');\n stream.read(0);\n if (len === state.length)\n // didn't get any data, stop spinning.\n break;\n }\n state.readingMore = false;\n}\n\n// abstract method. to be overridden in specific implementation classes.\n// call cb(er, data) where data is <= n in length.\n// for virtual (non-string, non-buffer) streams, \"length\" is somewhat\n// arbitrary, and perhaps not very meaningful.\nReadable.prototype._read = function (n) {\n errorOrDestroy(this, new ERR_METHOD_NOT_IMPLEMENTED('_read()'));\n};\nReadable.prototype.pipe = function (dest, pipeOpts) {\n var src = this;\n var state = this._readableState;\n switch (state.pipesCount) {\n case 0:\n state.pipes = dest;\n break;\n case 1:\n state.pipes = [state.pipes, dest];\n break;\n default:\n state.pipes.push(dest);\n break;\n }\n state.pipesCount += 1;\n debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts);\n var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr;\n var endFn = doEnd ? onend : unpipe;\n if (state.endEmitted) process.nextTick(endFn);else src.once('end', endFn);\n dest.on('unpipe', onunpipe);\n function onunpipe(readable, unpipeInfo) {\n debug('onunpipe');\n if (readable === src) {\n if (unpipeInfo && unpipeInfo.hasUnpiped === false) {\n unpipeInfo.hasUnpiped = true;\n cleanup();\n }\n }\n }\n function onend() {\n debug('onend');\n dest.end();\n }\n\n // when the dest drains, it reduces the awaitDrain counter\n // on the source. This would be more elegant with a .once()\n // handler in flow(), but adding and removing repeatedly is\n // too slow.\n var ondrain = pipeOnDrain(src);\n dest.on('drain', ondrain);\n var cleanedUp = false;\n function cleanup() {\n debug('cleanup');\n // cleanup event handlers once the pipe is broken\n dest.removeListener('close', onclose);\n dest.removeListener('finish', onfinish);\n dest.removeListener('drain', ondrain);\n dest.removeListener('error', onerror);\n dest.removeListener('unpipe', onunpipe);\n src.removeListener('end', onend);\n src.removeListener('end', unpipe);\n src.removeListener('data', ondata);\n cleanedUp = true;\n\n // if the reader is waiting for a drain event from this\n // specific writer, then it would cause it to never start\n // flowing again.\n // So, if this is awaiting a drain, then we just call it now.\n // If we don't know, then assume that we are waiting for one.\n if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain();\n }\n src.on('data', ondata);\n function ondata(chunk) {\n debug('ondata');\n var ret = dest.write(chunk);\n debug('dest.write', ret);\n if (ret === false) {\n // If the user unpiped during `dest.write()`, it is possible\n // to get stuck in a permanently paused state if that write\n // also returned false.\n // => Check whether `dest` is still a piping destination.\n if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) {\n debug('false write response, pause', state.awaitDrain);\n state.awaitDrain++;\n }\n src.pause();\n }\n }\n\n // if the dest has an error, then stop piping into it.\n // however, don't suppress the throwing behavior for this.\n function onerror(er) {\n debug('onerror', er);\n unpipe();\n dest.removeListener('error', onerror);\n if (EElistenerCount(dest, 'error') === 0) errorOrDestroy(dest, er);\n }\n\n // Make sure our error handler is attached before userland ones.\n prependListener(dest, 'error', onerror);\n\n // Both close and finish should trigger unpipe, but only once.\n function onclose() {\n dest.removeListener('finish', onfinish);\n unpipe();\n }\n dest.once('close', onclose);\n function onfinish() {\n debug('onfinish');\n dest.removeListener('close', onclose);\n unpipe();\n }\n dest.once('finish', onfinish);\n function unpipe() {\n debug('unpipe');\n src.unpipe(dest);\n }\n\n // tell the dest that it's being piped to\n dest.emit('pipe', src);\n\n // start the flow if it hasn't been started already.\n if (!state.flowing) {\n debug('pipe resume');\n src.resume();\n }\n return dest;\n};\nfunction pipeOnDrain(src) {\n return function pipeOnDrainFunctionResult() {\n var state = src._readableState;\n debug('pipeOnDrain', state.awaitDrain);\n if (state.awaitDrain) state.awaitDrain--;\n if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) {\n state.flowing = true;\n flow(src);\n }\n };\n}\nReadable.prototype.unpipe = function (dest) {\n var state = this._readableState;\n var unpipeInfo = {\n hasUnpiped: false\n };\n\n // if we're not piping anywhere, then do nothing.\n if (state.pipesCount === 0) return this;\n\n // just one destination. most common case.\n if (state.pipesCount === 1) {\n // passed in one, but it's not the right one.\n if (dest && dest !== state.pipes) return this;\n if (!dest) dest = state.pipes;\n\n // got a match.\n state.pipes = null;\n state.pipesCount = 0;\n state.flowing = false;\n if (dest) dest.emit('unpipe', this, unpipeInfo);\n return this;\n }\n\n // slow case. multiple pipe destinations.\n\n if (!dest) {\n // remove all.\n var dests = state.pipes;\n var len = state.pipesCount;\n state.pipes = null;\n state.pipesCount = 0;\n state.flowing = false;\n for (var i = 0; i < len; i++) dests[i].emit('unpipe', this, {\n hasUnpiped: false\n });\n return this;\n }\n\n // try to find the right one.\n var index = indexOf(state.pipes, dest);\n if (index === -1) return this;\n state.pipes.splice(index, 1);\n state.pipesCount -= 1;\n if (state.pipesCount === 1) state.pipes = state.pipes[0];\n dest.emit('unpipe', this, unpipeInfo);\n return this;\n};\n\n// set up data events if they are asked for\n// Ensure readable listeners eventually get something\nReadable.prototype.on = function (ev, fn) {\n var res = Stream.prototype.on.call(this, ev, fn);\n var state = this._readableState;\n if (ev === 'data') {\n // update readableListening so that resume() may be a no-op\n // a few lines down. This is needed to support once('readable').\n state.readableListening = this.listenerCount('readable') > 0;\n\n // Try start flowing on next tick if stream isn't explicitly paused\n if (state.flowing !== false) this.resume();\n } else if (ev === 'readable') {\n if (!state.endEmitted && !state.readableListening) {\n state.readableListening = state.needReadable = true;\n state.flowing = false;\n state.emittedReadable = false;\n debug('on readable', state.length, state.reading);\n if (state.length) {\n emitReadable(this);\n } else if (!state.reading) {\n process.nextTick(nReadingNextTick, this);\n }\n }\n }\n return res;\n};\nReadable.prototype.addListener = Readable.prototype.on;\nReadable.prototype.removeListener = function (ev, fn) {\n var res = Stream.prototype.removeListener.call(this, ev, fn);\n if (ev === 'readable') {\n // We need to check if there is someone still listening to\n // readable and reset the state. However this needs to happen\n // after readable has been emitted but before I/O (nextTick) to\n // support once('readable', fn) cycles. This means that calling\n // resume within the same tick will have no\n // effect.\n process.nextTick(updateReadableListening, this);\n }\n return res;\n};\nReadable.prototype.removeAllListeners = function (ev) {\n var res = Stream.prototype.removeAllListeners.apply(this, arguments);\n if (ev === 'readable' || ev === undefined) {\n // We need to check if there is someone still listening to\n // readable and reset the state. However this needs to happen\n // after readable has been emitted but before I/O (nextTick) to\n // support once('readable', fn) cycles. This means that calling\n // resume within the same tick will have no\n // effect.\n process.nextTick(updateReadableListening, this);\n }\n return res;\n};\nfunction updateReadableListening(self) {\n var state = self._readableState;\n state.readableListening = self.listenerCount('readable') > 0;\n if (state.resumeScheduled && !state.paused) {\n // flowing needs to be set to true now, otherwise\n // the upcoming resume will not flow.\n state.flowing = true;\n\n // crude way to check if we should resume\n } else if (self.listenerCount('data') > 0) {\n self.resume();\n }\n}\nfunction nReadingNextTick(self) {\n debug('readable nexttick read 0');\n self.read(0);\n}\n\n// pause() and resume() are remnants of the legacy readable stream API\n// If the user uses them, then switch into old mode.\nReadable.prototype.resume = function () {\n var state = this._readableState;\n if (!state.flowing) {\n debug('resume');\n // we flow only if there is no one listening\n // for readable, but we still have to call\n // resume()\n state.flowing = !state.readableListening;\n resume(this, state);\n }\n state.paused = false;\n return this;\n};\nfunction resume(stream, state) {\n if (!state.resumeScheduled) {\n state.resumeScheduled = true;\n process.nextTick(resume_, stream, state);\n }\n}\nfunction resume_(stream, state) {\n debug('resume', state.reading);\n if (!state.reading) {\n stream.read(0);\n }\n state.resumeScheduled = false;\n stream.emit('resume');\n flow(stream);\n if (state.flowing && !state.reading) stream.read(0);\n}\nReadable.prototype.pause = function () {\n debug('call pause flowing=%j', this._readableState.flowing);\n if (this._readableState.flowing !== false) {\n debug('pause');\n this._readableState.flowing = false;\n this.emit('pause');\n }\n this._readableState.paused = true;\n return this;\n};\nfunction flow(stream) {\n var state = stream._readableState;\n debug('flow', state.flowing);\n while (state.flowing && stream.read() !== null);\n}\n\n// wrap an old-style stream as the async data source.\n// This is *not* part of the readable stream interface.\n// It is an ugly unfortunate mess of history.\nReadable.prototype.wrap = function (stream) {\n var _this = this;\n var state = this._readableState;\n var paused = false;\n stream.on('end', function () {\n debug('wrapped end');\n if (state.decoder && !state.ended) {\n var chunk = state.decoder.end();\n if (chunk && chunk.length) _this.push(chunk);\n }\n _this.push(null);\n });\n stream.on('data', function (chunk) {\n debug('wrapped data');\n if (state.decoder) chunk = state.decoder.write(chunk);\n\n // don't skip over falsy values in objectMode\n if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return;\n var ret = _this.push(chunk);\n if (!ret) {\n paused = true;\n stream.pause();\n }\n });\n\n // proxy all the other methods.\n // important when wrapping filters and duplexes.\n for (var i in stream) {\n if (this[i] === undefined && typeof stream[i] === 'function') {\n this[i] = function methodWrap(method) {\n return function methodWrapReturnFunction() {\n return stream[method].apply(stream, arguments);\n };\n }(i);\n }\n }\n\n // proxy certain important events.\n for (var n = 0; n < kProxyEvents.length; n++) {\n stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n]));\n }\n\n // when we try to consume some more bytes, simply unpause the\n // underlying stream.\n this._read = function (n) {\n debug('wrapped _read', n);\n if (paused) {\n paused = false;\n stream.resume();\n }\n };\n return this;\n};\nif (typeof Symbol === 'function') {\n Readable.prototype[Symbol.asyncIterator] = function () {\n if (createReadableStreamAsyncIterator === undefined) {\n createReadableStreamAsyncIterator = __webpack_require__(/*! ./internal/streams/async_iterator */ \"./node_modules/readable-stream/lib/internal/streams/async_iterator.js\");\n }\n return createReadableStreamAsyncIterator(this);\n };\n}\nObject.defineProperty(Readable.prototype, 'readableHighWaterMark', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._readableState.highWaterMark;\n }\n});\nObject.defineProperty(Readable.prototype, 'readableBuffer', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._readableState && this._readableState.buffer;\n }\n});\nObject.defineProperty(Readable.prototype, 'readableFlowing', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._readableState.flowing;\n },\n set: function set(state) {\n if (this._readableState) {\n this._readableState.flowing = state;\n }\n }\n});\n\n// exposed for testing purposes only.\nReadable._fromList = fromList;\nObject.defineProperty(Readable.prototype, 'readableLength', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._readableState.length;\n }\n});\n\n// Pluck off n bytes from an array of buffers.\n// Length is the combined lengths of all the buffers in the list.\n// This function is designed to be inlinable, so please take care when making\n// changes to the function body.\nfunction fromList(n, state) {\n // nothing buffered\n if (state.length === 0) return null;\n var ret;\n if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) {\n // read it all, truncate the list\n if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.first();else ret = state.buffer.concat(state.length);\n state.buffer.clear();\n } else {\n // read part of list\n ret = state.buffer.consume(n, state.decoder);\n }\n return ret;\n}\nfunction endReadable(stream) {\n var state = stream._readableState;\n debug('endReadable', state.endEmitted);\n if (!state.endEmitted) {\n state.ended = true;\n process.nextTick(endReadableNT, state, stream);\n }\n}\nfunction endReadableNT(state, stream) {\n debug('endReadableNT', state.endEmitted, state.length);\n\n // Check that we didn't get one last unshift.\n if (!state.endEmitted && state.length === 0) {\n state.endEmitted = true;\n stream.readable = false;\n stream.emit('end');\n if (state.autoDestroy) {\n // In case of duplex streams we need a way to detect\n // if the writable side is ready for autoDestroy as well\n var wState = stream._writableState;\n if (!wState || wState.autoDestroy && wState.finished) {\n stream.destroy();\n }\n }\n }\n}\nif (typeof Symbol === 'function') {\n Readable.from = function (iterable, opts) {\n if (from === undefined) {\n from = __webpack_require__(/*! ./internal/streams/from */ \"./node_modules/readable-stream/lib/internal/streams/from-browser.js\");\n }\n return from(Readable, iterable, opts);\n };\n}\nfunction indexOf(xs, x) {\n for (var i = 0, l = xs.length; i < l; i++) {\n if (xs[i] === x) return i;\n }\n return -1;\n}\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/_stream_readable.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/_stream_transform.js": +/*!***************************************************************!*\ + !*** ./node_modules/readable-stream/lib/_stream_transform.js ***! + \***************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to permit\n// persons to whom the Software is furnished to do so, subject to the\n// following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n// USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n// a transform stream is a readable/writable stream where you do\n// something with the data. Sometimes it's called a \"filter\",\n// but that's not a great name for it, since that implies a thing where\n// some bits pass through, and others are simply ignored. (That would\n// be a valid example of a transform, of course.)\n//\n// While the output is causally related to the input, it's not a\n// necessarily symmetric or synchronous transformation. For example,\n// a zlib stream might take multiple plain-text writes(), and then\n// emit a single compressed chunk some time in the future.\n//\n// Here's how this works:\n//\n// The Transform stream has all the aspects of the readable and writable\n// stream classes. When you write(chunk), that calls _write(chunk,cb)\n// internally, and returns false if there's a lot of pending writes\n// buffered up. When you call read(), that calls _read(n) until\n// there's enough pending readable data buffered up.\n//\n// In a transform stream, the written data is placed in a buffer. When\n// _read(n) is called, it transforms the queued up data, calling the\n// buffered _write cb's as it consumes chunks. If consuming a single\n// written chunk would result in multiple output chunks, then the first\n// outputted bit calls the readcb, and subsequent chunks just go into\n// the read buffer, and will cause it to emit 'readable' if necessary.\n//\n// This way, back-pressure is actually determined by the reading side,\n// since _read has to be called to start processing a new chunk. However,\n// a pathological inflate type of transform can cause excessive buffering\n// here. For example, imagine a stream where every byte of input is\n// interpreted as an integer from 0-255, and then results in that many\n// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in\n// 1kb of data being output. In this case, you could write a very small\n// amount of input, and end up with a very large amount of output. In\n// such a pathological inflating mechanism, there'd be no way to tell\n// the system to stop doing the transform. A single 4MB write could\n// cause the system to run out of memory.\n//\n// However, even in such a pathological case, only a single written chunk\n// would be consumed, and then the rest would wait (un-transformed) until\n// the results of the previous transformed chunk were consumed.\n\n\n\nmodule.exports = Transform;\nvar _require$codes = (__webpack_require__(/*! ../errors */ \"./node_modules/readable-stream/errors-browser.js\").codes),\n ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,\n ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK,\n ERR_TRANSFORM_ALREADY_TRANSFORMING = _require$codes.ERR_TRANSFORM_ALREADY_TRANSFORMING,\n ERR_TRANSFORM_WITH_LENGTH_0 = _require$codes.ERR_TRANSFORM_WITH_LENGTH_0;\nvar Duplex = __webpack_require__(/*! ./_stream_duplex */ \"./node_modules/readable-stream/lib/_stream_duplex.js\");\n__webpack_require__(/*! inherits */ \"./node_modules/inherits/inherits_browser.js\")(Transform, Duplex);\nfunction afterTransform(er, data) {\n var ts = this._transformState;\n ts.transforming = false;\n var cb = ts.writecb;\n if (cb === null) {\n return this.emit('error', new ERR_MULTIPLE_CALLBACK());\n }\n ts.writechunk = null;\n ts.writecb = null;\n if (data != null)\n // single equals check for both `null` and `undefined`\n this.push(data);\n cb(er);\n var rs = this._readableState;\n rs.reading = false;\n if (rs.needReadable || rs.length < rs.highWaterMark) {\n this._read(rs.highWaterMark);\n }\n}\nfunction Transform(options) {\n if (!(this instanceof Transform)) return new Transform(options);\n Duplex.call(this, options);\n this._transformState = {\n afterTransform: afterTransform.bind(this),\n needTransform: false,\n transforming: false,\n writecb: null,\n writechunk: null,\n writeencoding: null\n };\n\n // start out asking for a readable event once data is transformed.\n this._readableState.needReadable = true;\n\n // we have implemented the _read method, and done the other things\n // that Readable wants before the first _read call, so unset the\n // sync guard flag.\n this._readableState.sync = false;\n if (options) {\n if (typeof options.transform === 'function') this._transform = options.transform;\n if (typeof options.flush === 'function') this._flush = options.flush;\n }\n\n // When the writable side finishes, then flush out anything remaining.\n this.on('prefinish', prefinish);\n}\nfunction prefinish() {\n var _this = this;\n if (typeof this._flush === 'function' && !this._readableState.destroyed) {\n this._flush(function (er, data) {\n done(_this, er, data);\n });\n } else {\n done(this, null, null);\n }\n}\nTransform.prototype.push = function (chunk, encoding) {\n this._transformState.needTransform = false;\n return Duplex.prototype.push.call(this, chunk, encoding);\n};\n\n// This is the part where you do stuff!\n// override this function in implementation classes.\n// 'chunk' is an input chunk.\n//\n// Call `push(newChunk)` to pass along transformed output\n// to the readable side. You may call 'push' zero or more times.\n//\n// Call `cb(err)` when you are done with this chunk. If you pass\n// an error, then that'll put the hurt on the whole operation. If you\n// never call cb(), then you'll never get another chunk.\nTransform.prototype._transform = function (chunk, encoding, cb) {\n cb(new ERR_METHOD_NOT_IMPLEMENTED('_transform()'));\n};\nTransform.prototype._write = function (chunk, encoding, cb) {\n var ts = this._transformState;\n ts.writecb = cb;\n ts.writechunk = chunk;\n ts.writeencoding = encoding;\n if (!ts.transforming) {\n var rs = this._readableState;\n if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark);\n }\n};\n\n// Doesn't matter what the args are here.\n// _transform does all the work.\n// That we got here means that the readable side wants more data.\nTransform.prototype._read = function (n) {\n var ts = this._transformState;\n if (ts.writechunk !== null && !ts.transforming) {\n ts.transforming = true;\n this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform);\n } else {\n // mark that we need a transform, so that any data that comes in\n // will get processed, now that we've asked for it.\n ts.needTransform = true;\n }\n};\nTransform.prototype._destroy = function (err, cb) {\n Duplex.prototype._destroy.call(this, err, function (err2) {\n cb(err2);\n });\n};\nfunction done(stream, er, data) {\n if (er) return stream.emit('error', er);\n if (data != null)\n // single equals check for both `null` and `undefined`\n stream.push(data);\n\n // TODO(BridgeAR): Write a test for these two error cases\n // if there's nothing in the write buffer, then that means\n // that nothing more will ever be provided\n if (stream._writableState.length) throw new ERR_TRANSFORM_WITH_LENGTH_0();\n if (stream._transformState.transforming) throw new ERR_TRANSFORM_ALREADY_TRANSFORMING();\n return stream.push(null);\n}\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/_stream_transform.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/_stream_writable.js": +/*!**************************************************************!*\ + !*** ./node_modules/readable-stream/lib/_stream_writable.js ***! + \**************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("/* provided dependency */ var process = __webpack_require__(/*! process/browser */ \"./node_modules/process/browser.js\");\n// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to permit\n// persons to whom the Software is furnished to do so, subject to the\n// following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n// USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n// A bit simpler than readable streams.\n// Implement an async ._write(chunk, encoding, cb), and it'll handle all\n// the drain event emission and buffering.\n\n\n\nmodule.exports = Writable;\n\n/* */\nfunction WriteReq(chunk, encoding, cb) {\n this.chunk = chunk;\n this.encoding = encoding;\n this.callback = cb;\n this.next = null;\n}\n\n// It seems a linked list but it is not\n// there will be only 2 of these for each stream\nfunction CorkedRequest(state) {\n var _this = this;\n this.next = null;\n this.entry = null;\n this.finish = function () {\n onCorkedFinish(_this, state);\n };\n}\n/* */\n\n/**/\nvar Duplex;\n/**/\n\nWritable.WritableState = WritableState;\n\n/**/\nvar internalUtil = {\n deprecate: __webpack_require__(/*! util-deprecate */ \"./node_modules/util-deprecate/browser.js\")\n};\n/**/\n\n/**/\nvar Stream = __webpack_require__(/*! ./internal/streams/stream */ \"./node_modules/readable-stream/lib/internal/streams/stream-browser.js\");\n/**/\n\nvar Buffer = (__webpack_require__(/*! buffer */ \"./node_modules/buffer/index.js\").Buffer);\nvar OurUint8Array = (typeof __webpack_require__.g !== 'undefined' ? __webpack_require__.g : typeof window !== 'undefined' ? window : typeof self !== 'undefined' ? self : {}).Uint8Array || function () {};\nfunction _uint8ArrayToBuffer(chunk) {\n return Buffer.from(chunk);\n}\nfunction _isUint8Array(obj) {\n return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;\n}\nvar destroyImpl = __webpack_require__(/*! ./internal/streams/destroy */ \"./node_modules/readable-stream/lib/internal/streams/destroy.js\");\nvar _require = __webpack_require__(/*! ./internal/streams/state */ \"./node_modules/readable-stream/lib/internal/streams/state.js\"),\n getHighWaterMark = _require.getHighWaterMark;\nvar _require$codes = (__webpack_require__(/*! ../errors */ \"./node_modules/readable-stream/errors-browser.js\").codes),\n ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,\n ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,\n ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK,\n ERR_STREAM_CANNOT_PIPE = _require$codes.ERR_STREAM_CANNOT_PIPE,\n ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED,\n ERR_STREAM_NULL_VALUES = _require$codes.ERR_STREAM_NULL_VALUES,\n ERR_STREAM_WRITE_AFTER_END = _require$codes.ERR_STREAM_WRITE_AFTER_END,\n ERR_UNKNOWN_ENCODING = _require$codes.ERR_UNKNOWN_ENCODING;\nvar errorOrDestroy = destroyImpl.errorOrDestroy;\n__webpack_require__(/*! inherits */ \"./node_modules/inherits/inherits_browser.js\")(Writable, Stream);\nfunction nop() {}\nfunction WritableState(options, stream, isDuplex) {\n Duplex = Duplex || __webpack_require__(/*! ./_stream_duplex */ \"./node_modules/readable-stream/lib/_stream_duplex.js\");\n options = options || {};\n\n // Duplex streams are both readable and writable, but share\n // the same options object.\n // However, some cases require setting options to different\n // values for the readable and the writable sides of the duplex stream,\n // e.g. options.readableObjectMode vs. options.writableObjectMode, etc.\n if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex;\n\n // object stream flag to indicate whether or not this stream\n // contains buffers or objects.\n this.objectMode = !!options.objectMode;\n if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode;\n\n // the point at which write() starts returning false\n // Note: 0 is a valid value, means that we always return false if\n // the entire buffer is not flushed immediately on write()\n this.highWaterMark = getHighWaterMark(this, options, 'writableHighWaterMark', isDuplex);\n\n // if _final has been called\n this.finalCalled = false;\n\n // drain event flag.\n this.needDrain = false;\n // at the start of calling end()\n this.ending = false;\n // when end() has been called, and returned\n this.ended = false;\n // when 'finish' is emitted\n this.finished = false;\n\n // has it been destroyed\n this.destroyed = false;\n\n // should we decode strings into buffers before passing to _write?\n // this is here so that some node-core streams can optimize string\n // handling at a lower level.\n var noDecode = options.decodeStrings === false;\n this.decodeStrings = !noDecode;\n\n // Crypto is kind of old and crusty. Historically, its default string\n // encoding is 'binary' so we have to make this configurable.\n // Everything else in the universe uses 'utf8', though.\n this.defaultEncoding = options.defaultEncoding || 'utf8';\n\n // not an actual buffer we keep track of, but a measurement\n // of how much we're waiting to get pushed to some underlying\n // socket or file.\n this.length = 0;\n\n // a flag to see when we're in the middle of a write.\n this.writing = false;\n\n // when true all writes will be buffered until .uncork() call\n this.corked = 0;\n\n // a flag to be able to tell if the onwrite cb is called immediately,\n // or on a later tick. We set this to true at first, because any\n // actions that shouldn't happen until \"later\" should generally also\n // not happen before the first write call.\n this.sync = true;\n\n // a flag to know if we're processing previously buffered items, which\n // may call the _write() callback in the same tick, so that we don't\n // end up in an overlapped onwrite situation.\n this.bufferProcessing = false;\n\n // the callback that's passed to _write(chunk,cb)\n this.onwrite = function (er) {\n onwrite(stream, er);\n };\n\n // the callback that the user supplies to write(chunk,encoding,cb)\n this.writecb = null;\n\n // the amount that is being written when _write is called.\n this.writelen = 0;\n this.bufferedRequest = null;\n this.lastBufferedRequest = null;\n\n // number of pending user-supplied write callbacks\n // this must be 0 before 'finish' can be emitted\n this.pendingcb = 0;\n\n // emit prefinish if the only thing we're waiting for is _write cbs\n // This is relevant for synchronous Transform streams\n this.prefinished = false;\n\n // True if the error was already emitted and should not be thrown again\n this.errorEmitted = false;\n\n // Should close be emitted on destroy. Defaults to true.\n this.emitClose = options.emitClose !== false;\n\n // Should .destroy() be called after 'finish' (and potentially 'end')\n this.autoDestroy = !!options.autoDestroy;\n\n // count buffered requests\n this.bufferedRequestCount = 0;\n\n // allocate the first CorkedRequest, there is always\n // one allocated and free to use, and we maintain at most two\n this.corkedRequestsFree = new CorkedRequest(this);\n}\nWritableState.prototype.getBuffer = function getBuffer() {\n var current = this.bufferedRequest;\n var out = [];\n while (current) {\n out.push(current);\n current = current.next;\n }\n return out;\n};\n(function () {\n try {\n Object.defineProperty(WritableState.prototype, 'buffer', {\n get: internalUtil.deprecate(function writableStateBufferGetter() {\n return this.getBuffer();\n }, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003')\n });\n } catch (_) {}\n})();\n\n// Test _writableState for inheritance to account for Duplex streams,\n// whose prototype chain only points to Readable.\nvar realHasInstance;\nif (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') {\n realHasInstance = Function.prototype[Symbol.hasInstance];\n Object.defineProperty(Writable, Symbol.hasInstance, {\n value: function value(object) {\n if (realHasInstance.call(this, object)) return true;\n if (this !== Writable) return false;\n return object && object._writableState instanceof WritableState;\n }\n });\n} else {\n realHasInstance = function realHasInstance(object) {\n return object instanceof this;\n };\n}\nfunction Writable(options) {\n Duplex = Duplex || __webpack_require__(/*! ./_stream_duplex */ \"./node_modules/readable-stream/lib/_stream_duplex.js\");\n\n // Writable ctor is applied to Duplexes, too.\n // `realHasInstance` is necessary because using plain `instanceof`\n // would return false, as no `_writableState` property is attached.\n\n // Trying to use the custom `instanceof` for Writable here will also break the\n // Node.js LazyTransform implementation, which has a non-trivial getter for\n // `_writableState` that would lead to infinite recursion.\n\n // Checking for a Stream.Duplex instance is faster here instead of inside\n // the WritableState constructor, at least with V8 6.5\n var isDuplex = this instanceof Duplex;\n if (!isDuplex && !realHasInstance.call(Writable, this)) return new Writable(options);\n this._writableState = new WritableState(options, this, isDuplex);\n\n // legacy.\n this.writable = true;\n if (options) {\n if (typeof options.write === 'function') this._write = options.write;\n if (typeof options.writev === 'function') this._writev = options.writev;\n if (typeof options.destroy === 'function') this._destroy = options.destroy;\n if (typeof options.final === 'function') this._final = options.final;\n }\n Stream.call(this);\n}\n\n// Otherwise people can pipe Writable streams, which is just wrong.\nWritable.prototype.pipe = function () {\n errorOrDestroy(this, new ERR_STREAM_CANNOT_PIPE());\n};\nfunction writeAfterEnd(stream, cb) {\n var er = new ERR_STREAM_WRITE_AFTER_END();\n // TODO: defer error events consistently everywhere, not just the cb\n errorOrDestroy(stream, er);\n process.nextTick(cb, er);\n}\n\n// Checks that a user-supplied chunk is valid, especially for the particular\n// mode the stream is in. Currently this means that `null` is never accepted\n// and undefined/non-string values are only allowed in object mode.\nfunction validChunk(stream, state, chunk, cb) {\n var er;\n if (chunk === null) {\n er = new ERR_STREAM_NULL_VALUES();\n } else if (typeof chunk !== 'string' && !state.objectMode) {\n er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer'], chunk);\n }\n if (er) {\n errorOrDestroy(stream, er);\n process.nextTick(cb, er);\n return false;\n }\n return true;\n}\nWritable.prototype.write = function (chunk, encoding, cb) {\n var state = this._writableState;\n var ret = false;\n var isBuf = !state.objectMode && _isUint8Array(chunk);\n if (isBuf && !Buffer.isBuffer(chunk)) {\n chunk = _uint8ArrayToBuffer(chunk);\n }\n if (typeof encoding === 'function') {\n cb = encoding;\n encoding = null;\n }\n if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding;\n if (typeof cb !== 'function') cb = nop;\n if (state.ending) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) {\n state.pendingcb++;\n ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb);\n }\n return ret;\n};\nWritable.prototype.cork = function () {\n this._writableState.corked++;\n};\nWritable.prototype.uncork = function () {\n var state = this._writableState;\n if (state.corked) {\n state.corked--;\n if (!state.writing && !state.corked && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state);\n }\n};\nWritable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) {\n // node::ParseEncoding() requires lower case.\n if (typeof encoding === 'string') encoding = encoding.toLowerCase();\n if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new ERR_UNKNOWN_ENCODING(encoding);\n this._writableState.defaultEncoding = encoding;\n return this;\n};\nObject.defineProperty(Writable.prototype, 'writableBuffer', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._writableState && this._writableState.getBuffer();\n }\n});\nfunction decodeChunk(state, chunk, encoding) {\n if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') {\n chunk = Buffer.from(chunk, encoding);\n }\n return chunk;\n}\nObject.defineProperty(Writable.prototype, 'writableHighWaterMark', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._writableState.highWaterMark;\n }\n});\n\n// if we're already writing something, then just put this\n// in the queue, and wait our turn. Otherwise, call _write\n// If we return false, then we need a drain event, so set that flag.\nfunction writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) {\n if (!isBuf) {\n var newChunk = decodeChunk(state, chunk, encoding);\n if (chunk !== newChunk) {\n isBuf = true;\n encoding = 'buffer';\n chunk = newChunk;\n }\n }\n var len = state.objectMode ? 1 : chunk.length;\n state.length += len;\n var ret = state.length < state.highWaterMark;\n // we must ensure that previous needDrain will not be reset to false.\n if (!ret) state.needDrain = true;\n if (state.writing || state.corked) {\n var last = state.lastBufferedRequest;\n state.lastBufferedRequest = {\n chunk: chunk,\n encoding: encoding,\n isBuf: isBuf,\n callback: cb,\n next: null\n };\n if (last) {\n last.next = state.lastBufferedRequest;\n } else {\n state.bufferedRequest = state.lastBufferedRequest;\n }\n state.bufferedRequestCount += 1;\n } else {\n doWrite(stream, state, false, len, chunk, encoding, cb);\n }\n return ret;\n}\nfunction doWrite(stream, state, writev, len, chunk, encoding, cb) {\n state.writelen = len;\n state.writecb = cb;\n state.writing = true;\n state.sync = true;\n if (state.destroyed) state.onwrite(new ERR_STREAM_DESTROYED('write'));else if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite);\n state.sync = false;\n}\nfunction onwriteError(stream, state, sync, er, cb) {\n --state.pendingcb;\n if (sync) {\n // defer the callback if we are being called synchronously\n // to avoid piling up things on the stack\n process.nextTick(cb, er);\n // this can emit finish, and it will always happen\n // after error\n process.nextTick(finishMaybe, stream, state);\n stream._writableState.errorEmitted = true;\n errorOrDestroy(stream, er);\n } else {\n // the caller expect this to happen before if\n // it is async\n cb(er);\n stream._writableState.errorEmitted = true;\n errorOrDestroy(stream, er);\n // this can emit finish, but finish must\n // always follow error\n finishMaybe(stream, state);\n }\n}\nfunction onwriteStateUpdate(state) {\n state.writing = false;\n state.writecb = null;\n state.length -= state.writelen;\n state.writelen = 0;\n}\nfunction onwrite(stream, er) {\n var state = stream._writableState;\n var sync = state.sync;\n var cb = state.writecb;\n if (typeof cb !== 'function') throw new ERR_MULTIPLE_CALLBACK();\n onwriteStateUpdate(state);\n if (er) onwriteError(stream, state, sync, er, cb);else {\n // Check if we're actually ready to finish, but don't emit yet\n var finished = needFinish(state) || stream.destroyed;\n if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) {\n clearBuffer(stream, state);\n }\n if (sync) {\n process.nextTick(afterWrite, stream, state, finished, cb);\n } else {\n afterWrite(stream, state, finished, cb);\n }\n }\n}\nfunction afterWrite(stream, state, finished, cb) {\n if (!finished) onwriteDrain(stream, state);\n state.pendingcb--;\n cb();\n finishMaybe(stream, state);\n}\n\n// Must force callback to be called on nextTick, so that we don't\n// emit 'drain' before the write() consumer gets the 'false' return\n// value, and has a chance to attach a 'drain' listener.\nfunction onwriteDrain(stream, state) {\n if (state.length === 0 && state.needDrain) {\n state.needDrain = false;\n stream.emit('drain');\n }\n}\n\n// if there's something in the buffer waiting, then process it\nfunction clearBuffer(stream, state) {\n state.bufferProcessing = true;\n var entry = state.bufferedRequest;\n if (stream._writev && entry && entry.next) {\n // Fast case, write everything using _writev()\n var l = state.bufferedRequestCount;\n var buffer = new Array(l);\n var holder = state.corkedRequestsFree;\n holder.entry = entry;\n var count = 0;\n var allBuffers = true;\n while (entry) {\n buffer[count] = entry;\n if (!entry.isBuf) allBuffers = false;\n entry = entry.next;\n count += 1;\n }\n buffer.allBuffers = allBuffers;\n doWrite(stream, state, true, state.length, buffer, '', holder.finish);\n\n // doWrite is almost always async, defer these to save a bit of time\n // as the hot path ends with doWrite\n state.pendingcb++;\n state.lastBufferedRequest = null;\n if (holder.next) {\n state.corkedRequestsFree = holder.next;\n holder.next = null;\n } else {\n state.corkedRequestsFree = new CorkedRequest(state);\n }\n state.bufferedRequestCount = 0;\n } else {\n // Slow case, write chunks one-by-one\n while (entry) {\n var chunk = entry.chunk;\n var encoding = entry.encoding;\n var cb = entry.callback;\n var len = state.objectMode ? 1 : chunk.length;\n doWrite(stream, state, false, len, chunk, encoding, cb);\n entry = entry.next;\n state.bufferedRequestCount--;\n // if we didn't call the onwrite immediately, then\n // it means that we need to wait until it does.\n // also, that means that the chunk and cb are currently\n // being processed, so move the buffer counter past them.\n if (state.writing) {\n break;\n }\n }\n if (entry === null) state.lastBufferedRequest = null;\n }\n state.bufferedRequest = entry;\n state.bufferProcessing = false;\n}\nWritable.prototype._write = function (chunk, encoding, cb) {\n cb(new ERR_METHOD_NOT_IMPLEMENTED('_write()'));\n};\nWritable.prototype._writev = null;\nWritable.prototype.end = function (chunk, encoding, cb) {\n var state = this._writableState;\n if (typeof chunk === 'function') {\n cb = chunk;\n chunk = null;\n encoding = null;\n } else if (typeof encoding === 'function') {\n cb = encoding;\n encoding = null;\n }\n if (chunk !== null && chunk !== undefined) this.write(chunk, encoding);\n\n // .end() fully uncorks\n if (state.corked) {\n state.corked = 1;\n this.uncork();\n }\n\n // ignore unnecessary end() calls.\n if (!state.ending) endWritable(this, state, cb);\n return this;\n};\nObject.defineProperty(Writable.prototype, 'writableLength', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n return this._writableState.length;\n }\n});\nfunction needFinish(state) {\n return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing;\n}\nfunction callFinal(stream, state) {\n stream._final(function (err) {\n state.pendingcb--;\n if (err) {\n errorOrDestroy(stream, err);\n }\n state.prefinished = true;\n stream.emit('prefinish');\n finishMaybe(stream, state);\n });\n}\nfunction prefinish(stream, state) {\n if (!state.prefinished && !state.finalCalled) {\n if (typeof stream._final === 'function' && !state.destroyed) {\n state.pendingcb++;\n state.finalCalled = true;\n process.nextTick(callFinal, stream, state);\n } else {\n state.prefinished = true;\n stream.emit('prefinish');\n }\n }\n}\nfunction finishMaybe(stream, state) {\n var need = needFinish(state);\n if (need) {\n prefinish(stream, state);\n if (state.pendingcb === 0) {\n state.finished = true;\n stream.emit('finish');\n if (state.autoDestroy) {\n // In case of duplex streams we need a way to detect\n // if the readable side is ready for autoDestroy as well\n var rState = stream._readableState;\n if (!rState || rState.autoDestroy && rState.endEmitted) {\n stream.destroy();\n }\n }\n }\n }\n return need;\n}\nfunction endWritable(stream, state, cb) {\n state.ending = true;\n finishMaybe(stream, state);\n if (cb) {\n if (state.finished) process.nextTick(cb);else stream.once('finish', cb);\n }\n state.ended = true;\n stream.writable = false;\n}\nfunction onCorkedFinish(corkReq, state, err) {\n var entry = corkReq.entry;\n corkReq.entry = null;\n while (entry) {\n var cb = entry.callback;\n state.pendingcb--;\n cb(err);\n entry = entry.next;\n }\n\n // reuse the free corkReq.\n state.corkedRequestsFree.next = corkReq;\n}\nObject.defineProperty(Writable.prototype, 'destroyed', {\n // making it explicit this property is not enumerable\n // because otherwise some prototype manipulation in\n // userland will fail\n enumerable: false,\n get: function get() {\n if (this._writableState === undefined) {\n return false;\n }\n return this._writableState.destroyed;\n },\n set: function set(value) {\n // we ignore the value if the stream\n // has not been initialized yet\n if (!this._writableState) {\n return;\n }\n\n // backward compatibility, the user is explicitly\n // managing destroyed\n this._writableState.destroyed = value;\n }\n});\nWritable.prototype.destroy = destroyImpl.destroy;\nWritable.prototype._undestroy = destroyImpl.undestroy;\nWritable.prototype._destroy = function (err, cb) {\n cb(err);\n};\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/_stream_writable.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/internal/streams/async_iterator.js": +/*!*****************************************************************************!*\ + !*** ./node_modules/readable-stream/lib/internal/streams/async_iterator.js ***! + \*****************************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("/* provided dependency */ var process = __webpack_require__(/*! process/browser */ \"./node_modules/process/browser.js\");\n\n\nvar _Object$setPrototypeO;\nfunction _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }\nfunction _toPropertyKey(arg) { var key = _toPrimitive(arg, \"string\"); return typeof key === \"symbol\" ? key : String(key); }\nfunction _toPrimitive(input, hint) { if (typeof input !== \"object\" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || \"default\"); if (typeof res !== \"object\") return res; throw new TypeError(\"@@toPrimitive must return a primitive value.\"); } return (hint === \"string\" ? String : Number)(input); }\nvar finished = __webpack_require__(/*! ./end-of-stream */ \"./node_modules/readable-stream/lib/internal/streams/end-of-stream.js\");\nvar kLastResolve = Symbol('lastResolve');\nvar kLastReject = Symbol('lastReject');\nvar kError = Symbol('error');\nvar kEnded = Symbol('ended');\nvar kLastPromise = Symbol('lastPromise');\nvar kHandlePromise = Symbol('handlePromise');\nvar kStream = Symbol('stream');\nfunction createIterResult(value, done) {\n return {\n value: value,\n done: done\n };\n}\nfunction readAndResolve(iter) {\n var resolve = iter[kLastResolve];\n if (resolve !== null) {\n var data = iter[kStream].read();\n // we defer if data is null\n // we can be expecting either 'end' or\n // 'error'\n if (data !== null) {\n iter[kLastPromise] = null;\n iter[kLastResolve] = null;\n iter[kLastReject] = null;\n resolve(createIterResult(data, false));\n }\n }\n}\nfunction onReadable(iter) {\n // we wait for the next tick, because it might\n // emit an error with process.nextTick\n process.nextTick(readAndResolve, iter);\n}\nfunction wrapForNext(lastPromise, iter) {\n return function (resolve, reject) {\n lastPromise.then(function () {\n if (iter[kEnded]) {\n resolve(createIterResult(undefined, true));\n return;\n }\n iter[kHandlePromise](resolve, reject);\n }, reject);\n };\n}\nvar AsyncIteratorPrototype = Object.getPrototypeOf(function () {});\nvar ReadableStreamAsyncIteratorPrototype = Object.setPrototypeOf((_Object$setPrototypeO = {\n get stream() {\n return this[kStream];\n },\n next: function next() {\n var _this = this;\n // if we have detected an error in the meanwhile\n // reject straight away\n var error = this[kError];\n if (error !== null) {\n return Promise.reject(error);\n }\n if (this[kEnded]) {\n return Promise.resolve(createIterResult(undefined, true));\n }\n if (this[kStream].destroyed) {\n // We need to defer via nextTick because if .destroy(err) is\n // called, the error will be emitted via nextTick, and\n // we cannot guarantee that there is no error lingering around\n // waiting to be emitted.\n return new Promise(function (resolve, reject) {\n process.nextTick(function () {\n if (_this[kError]) {\n reject(_this[kError]);\n } else {\n resolve(createIterResult(undefined, true));\n }\n });\n });\n }\n\n // if we have multiple next() calls\n // we will wait for the previous Promise to finish\n // this logic is optimized to support for await loops,\n // where next() is only called once at a time\n var lastPromise = this[kLastPromise];\n var promise;\n if (lastPromise) {\n promise = new Promise(wrapForNext(lastPromise, this));\n } else {\n // fast path needed to support multiple this.push()\n // without triggering the next() queue\n var data = this[kStream].read();\n if (data !== null) {\n return Promise.resolve(createIterResult(data, false));\n }\n promise = new Promise(this[kHandlePromise]);\n }\n this[kLastPromise] = promise;\n return promise;\n }\n}, _defineProperty(_Object$setPrototypeO, Symbol.asyncIterator, function () {\n return this;\n}), _defineProperty(_Object$setPrototypeO, \"return\", function _return() {\n var _this2 = this;\n // destroy(err, cb) is a private API\n // we can guarantee we have that here, because we control the\n // Readable class this is attached to\n return new Promise(function (resolve, reject) {\n _this2[kStream].destroy(null, function (err) {\n if (err) {\n reject(err);\n return;\n }\n resolve(createIterResult(undefined, true));\n });\n });\n}), _Object$setPrototypeO), AsyncIteratorPrototype);\nvar createReadableStreamAsyncIterator = function createReadableStreamAsyncIterator(stream) {\n var _Object$create;\n var iterator = Object.create(ReadableStreamAsyncIteratorPrototype, (_Object$create = {}, _defineProperty(_Object$create, kStream, {\n value: stream,\n writable: true\n }), _defineProperty(_Object$create, kLastResolve, {\n value: null,\n writable: true\n }), _defineProperty(_Object$create, kLastReject, {\n value: null,\n writable: true\n }), _defineProperty(_Object$create, kError, {\n value: null,\n writable: true\n }), _defineProperty(_Object$create, kEnded, {\n value: stream._readableState.endEmitted,\n writable: true\n }), _defineProperty(_Object$create, kHandlePromise, {\n value: function value(resolve, reject) {\n var data = iterator[kStream].read();\n if (data) {\n iterator[kLastPromise] = null;\n iterator[kLastResolve] = null;\n iterator[kLastReject] = null;\n resolve(createIterResult(data, false));\n } else {\n iterator[kLastResolve] = resolve;\n iterator[kLastReject] = reject;\n }\n },\n writable: true\n }), _Object$create));\n iterator[kLastPromise] = null;\n finished(stream, function (err) {\n if (err && err.code !== 'ERR_STREAM_PREMATURE_CLOSE') {\n var reject = iterator[kLastReject];\n // reject if we are waiting for data in the Promise\n // returned by next() and store the error\n if (reject !== null) {\n iterator[kLastPromise] = null;\n iterator[kLastResolve] = null;\n iterator[kLastReject] = null;\n reject(err);\n }\n iterator[kError] = err;\n return;\n }\n var resolve = iterator[kLastResolve];\n if (resolve !== null) {\n iterator[kLastPromise] = null;\n iterator[kLastResolve] = null;\n iterator[kLastReject] = null;\n resolve(createIterResult(undefined, true));\n }\n iterator[kEnded] = true;\n });\n stream.on('readable', onReadable.bind(null, iterator));\n return iterator;\n};\nmodule.exports = createReadableStreamAsyncIterator;\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/internal/streams/async_iterator.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/internal/streams/buffer_list.js": +/*!**************************************************************************!*\ + !*** ./node_modules/readable-stream/lib/internal/streams/buffer_list.js ***! + \**************************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("\n\nfunction ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); enumerableOnly && (symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; })), keys.push.apply(keys, symbols); } return keys; }\nfunction _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = null != arguments[i] ? arguments[i] : {}; i % 2 ? ownKeys(Object(source), !0).forEach(function (key) { _defineProperty(target, key, source[key]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } return target; }\nfunction _defineProperty(obj, key, value) { key = _toPropertyKey(key); if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, _toPropertyKey(descriptor.key), descriptor); } }\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); Object.defineProperty(Constructor, \"prototype\", { writable: false }); return Constructor; }\nfunction _toPropertyKey(arg) { var key = _toPrimitive(arg, \"string\"); return typeof key === \"symbol\" ? key : String(key); }\nfunction _toPrimitive(input, hint) { if (typeof input !== \"object\" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || \"default\"); if (typeof res !== \"object\") return res; throw new TypeError(\"@@toPrimitive must return a primitive value.\"); } return (hint === \"string\" ? String : Number)(input); }\nvar _require = __webpack_require__(/*! buffer */ \"./node_modules/buffer/index.js\"),\n Buffer = _require.Buffer;\nvar _require2 = __webpack_require__(/*! util */ \"?ed1b\"),\n inspect = _require2.inspect;\nvar custom = inspect && inspect.custom || 'inspect';\nfunction copyBuffer(src, target, offset) {\n Buffer.prototype.copy.call(src, target, offset);\n}\nmodule.exports = /*#__PURE__*/function () {\n function BufferList() {\n _classCallCheck(this, BufferList);\n this.head = null;\n this.tail = null;\n this.length = 0;\n }\n _createClass(BufferList, [{\n key: \"push\",\n value: function push(v) {\n var entry = {\n data: v,\n next: null\n };\n if (this.length > 0) this.tail.next = entry;else this.head = entry;\n this.tail = entry;\n ++this.length;\n }\n }, {\n key: \"unshift\",\n value: function unshift(v) {\n var entry = {\n data: v,\n next: this.head\n };\n if (this.length === 0) this.tail = entry;\n this.head = entry;\n ++this.length;\n }\n }, {\n key: \"shift\",\n value: function shift() {\n if (this.length === 0) return;\n var ret = this.head.data;\n if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next;\n --this.length;\n return ret;\n }\n }, {\n key: \"clear\",\n value: function clear() {\n this.head = this.tail = null;\n this.length = 0;\n }\n }, {\n key: \"join\",\n value: function join(s) {\n if (this.length === 0) return '';\n var p = this.head;\n var ret = '' + p.data;\n while (p = p.next) ret += s + p.data;\n return ret;\n }\n }, {\n key: \"concat\",\n value: function concat(n) {\n if (this.length === 0) return Buffer.alloc(0);\n var ret = Buffer.allocUnsafe(n >>> 0);\n var p = this.head;\n var i = 0;\n while (p) {\n copyBuffer(p.data, ret, i);\n i += p.data.length;\n p = p.next;\n }\n return ret;\n }\n\n // Consumes a specified amount of bytes or characters from the buffered data.\n }, {\n key: \"consume\",\n value: function consume(n, hasStrings) {\n var ret;\n if (n < this.head.data.length) {\n // `slice` is the same for buffers and strings.\n ret = this.head.data.slice(0, n);\n this.head.data = this.head.data.slice(n);\n } else if (n === this.head.data.length) {\n // First chunk is a perfect match.\n ret = this.shift();\n } else {\n // Result spans more than one buffer.\n ret = hasStrings ? this._getString(n) : this._getBuffer(n);\n }\n return ret;\n }\n }, {\n key: \"first\",\n value: function first() {\n return this.head.data;\n }\n\n // Consumes a specified amount of characters from the buffered data.\n }, {\n key: \"_getString\",\n value: function _getString(n) {\n var p = this.head;\n var c = 1;\n var ret = p.data;\n n -= ret.length;\n while (p = p.next) {\n var str = p.data;\n var nb = n > str.length ? str.length : n;\n if (nb === str.length) ret += str;else ret += str.slice(0, n);\n n -= nb;\n if (n === 0) {\n if (nb === str.length) {\n ++c;\n if (p.next) this.head = p.next;else this.head = this.tail = null;\n } else {\n this.head = p;\n p.data = str.slice(nb);\n }\n break;\n }\n ++c;\n }\n this.length -= c;\n return ret;\n }\n\n // Consumes a specified amount of bytes from the buffered data.\n }, {\n key: \"_getBuffer\",\n value: function _getBuffer(n) {\n var ret = Buffer.allocUnsafe(n);\n var p = this.head;\n var c = 1;\n p.data.copy(ret);\n n -= p.data.length;\n while (p = p.next) {\n var buf = p.data;\n var nb = n > buf.length ? buf.length : n;\n buf.copy(ret, ret.length - n, 0, nb);\n n -= nb;\n if (n === 0) {\n if (nb === buf.length) {\n ++c;\n if (p.next) this.head = p.next;else this.head = this.tail = null;\n } else {\n this.head = p;\n p.data = buf.slice(nb);\n }\n break;\n }\n ++c;\n }\n this.length -= c;\n return ret;\n }\n\n // Make sure the linked list only shows the minimal necessary information.\n }, {\n key: custom,\n value: function value(_, options) {\n return inspect(this, _objectSpread(_objectSpread({}, options), {}, {\n // Only inspect one level.\n depth: 0,\n // It should not recurse.\n customInspect: false\n }));\n }\n }]);\n return BufferList;\n}();\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/internal/streams/buffer_list.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/internal/streams/destroy.js": +/*!**********************************************************************!*\ + !*** ./node_modules/readable-stream/lib/internal/streams/destroy.js ***! + \**********************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("/* provided dependency */ var process = __webpack_require__(/*! process/browser */ \"./node_modules/process/browser.js\");\n\n\n// undocumented cb() API, needed for core, not for public API\nfunction destroy(err, cb) {\n var _this = this;\n var readableDestroyed = this._readableState && this._readableState.destroyed;\n var writableDestroyed = this._writableState && this._writableState.destroyed;\n if (readableDestroyed || writableDestroyed) {\n if (cb) {\n cb(err);\n } else if (err) {\n if (!this._writableState) {\n process.nextTick(emitErrorNT, this, err);\n } else if (!this._writableState.errorEmitted) {\n this._writableState.errorEmitted = true;\n process.nextTick(emitErrorNT, this, err);\n }\n }\n return this;\n }\n\n // we set destroyed to true before firing error callbacks in order\n // to make it re-entrance safe in case destroy() is called within callbacks\n\n if (this._readableState) {\n this._readableState.destroyed = true;\n }\n\n // if this is a duplex stream mark the writable part as destroyed as well\n if (this._writableState) {\n this._writableState.destroyed = true;\n }\n this._destroy(err || null, function (err) {\n if (!cb && err) {\n if (!_this._writableState) {\n process.nextTick(emitErrorAndCloseNT, _this, err);\n } else if (!_this._writableState.errorEmitted) {\n _this._writableState.errorEmitted = true;\n process.nextTick(emitErrorAndCloseNT, _this, err);\n } else {\n process.nextTick(emitCloseNT, _this);\n }\n } else if (cb) {\n process.nextTick(emitCloseNT, _this);\n cb(err);\n } else {\n process.nextTick(emitCloseNT, _this);\n }\n });\n return this;\n}\nfunction emitErrorAndCloseNT(self, err) {\n emitErrorNT(self, err);\n emitCloseNT(self);\n}\nfunction emitCloseNT(self) {\n if (self._writableState && !self._writableState.emitClose) return;\n if (self._readableState && !self._readableState.emitClose) return;\n self.emit('close');\n}\nfunction undestroy() {\n if (this._readableState) {\n this._readableState.destroyed = false;\n this._readableState.reading = false;\n this._readableState.ended = false;\n this._readableState.endEmitted = false;\n }\n if (this._writableState) {\n this._writableState.destroyed = false;\n this._writableState.ended = false;\n this._writableState.ending = false;\n this._writableState.finalCalled = false;\n this._writableState.prefinished = false;\n this._writableState.finished = false;\n this._writableState.errorEmitted = false;\n }\n}\nfunction emitErrorNT(self, err) {\n self.emit('error', err);\n}\nfunction errorOrDestroy(stream, err) {\n // We have tests that rely on errors being emitted\n // in the same tick, so changing this is semver major.\n // For now when you opt-in to autoDestroy we allow\n // the error to be emitted nextTick. In a future\n // semver major update we should change the default to this.\n\n var rState = stream._readableState;\n var wState = stream._writableState;\n if (rState && rState.autoDestroy || wState && wState.autoDestroy) stream.destroy(err);else stream.emit('error', err);\n}\nmodule.exports = {\n destroy: destroy,\n undestroy: undestroy,\n errorOrDestroy: errorOrDestroy\n};\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/internal/streams/destroy.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/internal/streams/end-of-stream.js": +/*!****************************************************************************!*\ + !*** ./node_modules/readable-stream/lib/internal/streams/end-of-stream.js ***! + \****************************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("// Ported from https://github.com/mafintosh/end-of-stream with\n// permission from the author, Mathias Buus (@mafintosh).\n\n\n\nvar ERR_STREAM_PREMATURE_CLOSE = (__webpack_require__(/*! ../../../errors */ \"./node_modules/readable-stream/errors-browser.js\").codes).ERR_STREAM_PREMATURE_CLOSE;\nfunction once(callback) {\n var called = false;\n return function () {\n if (called) return;\n called = true;\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n callback.apply(this, args);\n };\n}\nfunction noop() {}\nfunction isRequest(stream) {\n return stream.setHeader && typeof stream.abort === 'function';\n}\nfunction eos(stream, opts, callback) {\n if (typeof opts === 'function') return eos(stream, null, opts);\n if (!opts) opts = {};\n callback = once(callback || noop);\n var readable = opts.readable || opts.readable !== false && stream.readable;\n var writable = opts.writable || opts.writable !== false && stream.writable;\n var onlegacyfinish = function onlegacyfinish() {\n if (!stream.writable) onfinish();\n };\n var writableEnded = stream._writableState && stream._writableState.finished;\n var onfinish = function onfinish() {\n writable = false;\n writableEnded = true;\n if (!readable) callback.call(stream);\n };\n var readableEnded = stream._readableState && stream._readableState.endEmitted;\n var onend = function onend() {\n readable = false;\n readableEnded = true;\n if (!writable) callback.call(stream);\n };\n var onerror = function onerror(err) {\n callback.call(stream, err);\n };\n var onclose = function onclose() {\n var err;\n if (readable && !readableEnded) {\n if (!stream._readableState || !stream._readableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();\n return callback.call(stream, err);\n }\n if (writable && !writableEnded) {\n if (!stream._writableState || !stream._writableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();\n return callback.call(stream, err);\n }\n };\n var onrequest = function onrequest() {\n stream.req.on('finish', onfinish);\n };\n if (isRequest(stream)) {\n stream.on('complete', onfinish);\n stream.on('abort', onclose);\n if (stream.req) onrequest();else stream.on('request', onrequest);\n } else if (writable && !stream._writableState) {\n // legacy streams\n stream.on('end', onlegacyfinish);\n stream.on('close', onlegacyfinish);\n }\n stream.on('end', onend);\n stream.on('finish', onfinish);\n if (opts.error !== false) stream.on('error', onerror);\n stream.on('close', onclose);\n return function () {\n stream.removeListener('complete', onfinish);\n stream.removeListener('abort', onclose);\n stream.removeListener('request', onrequest);\n if (stream.req) stream.req.removeListener('finish', onfinish);\n stream.removeListener('end', onlegacyfinish);\n stream.removeListener('close', onlegacyfinish);\n stream.removeListener('finish', onfinish);\n stream.removeListener('end', onend);\n stream.removeListener('error', onerror);\n stream.removeListener('close', onclose);\n };\n}\nmodule.exports = eos;\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/internal/streams/end-of-stream.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/internal/streams/from-browser.js": +/*!***************************************************************************!*\ + !*** ./node_modules/readable-stream/lib/internal/streams/from-browser.js ***! + \***************************************************************************/ +/***/ ((module) => { + +eval("module.exports = function () {\n throw new Error('Readable.from is not available in the browser')\n};\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/internal/streams/from-browser.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/internal/streams/pipeline.js": +/*!***********************************************************************!*\ + !*** ./node_modules/readable-stream/lib/internal/streams/pipeline.js ***! + \***********************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("// Ported from https://github.com/mafintosh/pump with\n// permission from the author, Mathias Buus (@mafintosh).\n\n\n\nvar eos;\nfunction once(callback) {\n var called = false;\n return function () {\n if (called) return;\n called = true;\n callback.apply(void 0, arguments);\n };\n}\nvar _require$codes = (__webpack_require__(/*! ../../../errors */ \"./node_modules/readable-stream/errors-browser.js\").codes),\n ERR_MISSING_ARGS = _require$codes.ERR_MISSING_ARGS,\n ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED;\nfunction noop(err) {\n // Rethrow the error if it exists to avoid swallowing it\n if (err) throw err;\n}\nfunction isRequest(stream) {\n return stream.setHeader && typeof stream.abort === 'function';\n}\nfunction destroyer(stream, reading, writing, callback) {\n callback = once(callback);\n var closed = false;\n stream.on('close', function () {\n closed = true;\n });\n if (eos === undefined) eos = __webpack_require__(/*! ./end-of-stream */ \"./node_modules/readable-stream/lib/internal/streams/end-of-stream.js\");\n eos(stream, {\n readable: reading,\n writable: writing\n }, function (err) {\n if (err) return callback(err);\n closed = true;\n callback();\n });\n var destroyed = false;\n return function (err) {\n if (closed) return;\n if (destroyed) return;\n destroyed = true;\n\n // request.destroy just do .end - .abort is what we want\n if (isRequest(stream)) return stream.abort();\n if (typeof stream.destroy === 'function') return stream.destroy();\n callback(err || new ERR_STREAM_DESTROYED('pipe'));\n };\n}\nfunction call(fn) {\n fn();\n}\nfunction pipe(from, to) {\n return from.pipe(to);\n}\nfunction popCallback(streams) {\n if (!streams.length) return noop;\n if (typeof streams[streams.length - 1] !== 'function') return noop;\n return streams.pop();\n}\nfunction pipeline() {\n for (var _len = arguments.length, streams = new Array(_len), _key = 0; _key < _len; _key++) {\n streams[_key] = arguments[_key];\n }\n var callback = popCallback(streams);\n if (Array.isArray(streams[0])) streams = streams[0];\n if (streams.length < 2) {\n throw new ERR_MISSING_ARGS('streams');\n }\n var error;\n var destroys = streams.map(function (stream, i) {\n var reading = i < streams.length - 1;\n var writing = i > 0;\n return destroyer(stream, reading, writing, function (err) {\n if (!error) error = err;\n if (err) destroys.forEach(call);\n if (reading) return;\n destroys.forEach(call);\n callback(error);\n });\n });\n return streams.reduce(pipe);\n}\nmodule.exports = pipeline;\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/internal/streams/pipeline.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/internal/streams/state.js": +/*!********************************************************************!*\ + !*** ./node_modules/readable-stream/lib/internal/streams/state.js ***! + \********************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +"use strict"; +eval("\n\nvar ERR_INVALID_OPT_VALUE = (__webpack_require__(/*! ../../../errors */ \"./node_modules/readable-stream/errors-browser.js\").codes).ERR_INVALID_OPT_VALUE;\nfunction highWaterMarkFrom(options, isDuplex, duplexKey) {\n return options.highWaterMark != null ? options.highWaterMark : isDuplex ? options[duplexKey] : null;\n}\nfunction getHighWaterMark(state, options, duplexKey, isDuplex) {\n var hwm = highWaterMarkFrom(options, isDuplex, duplexKey);\n if (hwm != null) {\n if (!(isFinite(hwm) && Math.floor(hwm) === hwm) || hwm < 0) {\n var name = isDuplex ? duplexKey : 'highWaterMark';\n throw new ERR_INVALID_OPT_VALUE(name, hwm);\n }\n return Math.floor(hwm);\n }\n\n // Default value\n return state.objectMode ? 16 : 16 * 1024;\n}\nmodule.exports = {\n getHighWaterMark: getHighWaterMark\n};\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/internal/streams/state.js?"); + +/***/ }), + +/***/ "./node_modules/readable-stream/lib/internal/streams/stream-browser.js": +/*!*****************************************************************************!*\ + !*** ./node_modules/readable-stream/lib/internal/streams/stream-browser.js ***! + \*****************************************************************************/ +/***/ ((module, __unused_webpack_exports, __webpack_require__) => { + +eval("module.exports = __webpack_require__(/*! events */ \"./node_modules/events/events.js\").EventEmitter;\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/readable-stream/lib/internal/streams/stream-browser.js?"); + +/***/ }), + +/***/ "./node_modules/safe-buffer/index.js": +/*!*******************************************!*\ + !*** ./node_modules/safe-buffer/index.js ***! + \*******************************************/ +/***/ ((module, exports, __webpack_require__) => { + +eval("/*! safe-buffer. MIT License. Feross Aboukhadijeh */\n/* eslint-disable node/no-deprecated-api */\nvar buffer = __webpack_require__(/*! buffer */ \"./node_modules/buffer/index.js\")\nvar Buffer = buffer.Buffer\n\n// alternative to using Object.keys for old browsers\nfunction copyProps (src, dst) {\n for (var key in src) {\n dst[key] = src[key]\n }\n}\nif (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) {\n module.exports = buffer\n} else {\n // Copy properties from require('buffer')\n copyProps(buffer, exports)\n exports.Buffer = SafeBuffer\n}\n\nfunction SafeBuffer (arg, encodingOrOffset, length) {\n return Buffer(arg, encodingOrOffset, length)\n}\n\nSafeBuffer.prototype = Object.create(Buffer.prototype)\n\n// Copy static methods from Buffer\ncopyProps(Buffer, SafeBuffer)\n\nSafeBuffer.from = function (arg, encodingOrOffset, length) {\n if (typeof arg === 'number') {\n throw new TypeError('Argument must not be a number')\n }\n return Buffer(arg, encodingOrOffset, length)\n}\n\nSafeBuffer.alloc = function (size, fill, encoding) {\n if (typeof size !== 'number') {\n throw new TypeError('Argument must be a number')\n }\n var buf = Buffer(size)\n if (fill !== undefined) {\n if (typeof encoding === 'string') {\n buf.fill(fill, encoding)\n } else {\n buf.fill(fill)\n }\n } else {\n buf.fill(0)\n }\n return buf\n}\n\nSafeBuffer.allocUnsafe = function (size) {\n if (typeof size !== 'number') {\n throw new TypeError('Argument must be a number')\n }\n return Buffer(size)\n}\n\nSafeBuffer.allocUnsafeSlow = function (size) {\n if (typeof size !== 'number') {\n throw new TypeError('Argument must be a number')\n }\n return buffer.SlowBuffer(size)\n}\n\n\n//# sourceURL=webpack://RSSParser/./node_modules/safe-buffer/index.js?"); + +/***/ }), + +/***/ "./node_modules/sax/lib/sax.js": +/*!*************************************!*\ + !*** ./node_modules/sax/lib/sax.js ***! + \*************************************/ +/***/ ((__unused_webpack_module, exports, __webpack_require__) => { + +eval("/* provided dependency */ var Buffer = __webpack_require__(/*! buffer */ \"./node_modules/buffer/index.js\")[\"Buffer\"];\n;(function (sax) { // wrapper for non-node envs\n sax.parser = function (strict, opt) { return new SAXParser(strict, opt) }\n sax.SAXParser = SAXParser\n sax.SAXStream = SAXStream\n sax.createStream = createStream\n\n // When we pass the MAX_BUFFER_LENGTH position, start checking for buffer overruns.\n // When we check, schedule the next check for MAX_BUFFER_LENGTH - (max(buffer lengths)),\n // since that's the earliest that a buffer overrun could occur. This way, checks are\n // as rare as required, but as often as necessary to ensure never crossing this bound.\n // Furthermore, buffers are only tested at most once per write(), so passing a very\n // large string into write() might have undesirable effects, but this is manageable by\n // the caller, so it is assumed to be safe. Thus, a call to write() may, in the extreme\n // edge case, result in creating at most one complete copy of the string passed in.\n // Set to Infinity to have unlimited buffers.\n sax.MAX_BUFFER_LENGTH = 64 * 1024\n\n var buffers = [\n 'comment', 'sgmlDecl', 'textNode', 'tagName', 'doctype',\n 'procInstName', 'procInstBody', 'entity', 'attribName',\n 'attribValue', 'cdata', 'script'\n ]\n\n sax.EVENTS = [\n 'text',\n 'processinginstruction',\n 'sgmldeclaration',\n 'doctype',\n 'comment',\n 'opentagstart',\n 'attribute',\n 'opentag',\n 'closetag',\n 'opencdata',\n 'cdata',\n 'closecdata',\n 'error',\n 'end',\n 'ready',\n 'script',\n 'opennamespace',\n 'closenamespace'\n ]\n\n function SAXParser (strict, opt) {\n if (!(this instanceof SAXParser)) {\n return new SAXParser(strict, opt)\n }\n\n var parser = this\n clearBuffers(parser)\n parser.q = parser.c = ''\n parser.bufferCheckPosition = sax.MAX_BUFFER_LENGTH\n parser.opt = opt || {}\n parser.opt.lowercase = parser.opt.lowercase || parser.opt.lowercasetags\n parser.looseCase = parser.opt.lowercase ? 'toLowerCase' : 'toUpperCase'\n parser.tags = []\n parser.closed = parser.closedRoot = parser.sawRoot = false\n parser.tag = parser.error = null\n parser.strict = !!strict\n parser.noscript = !!(strict || parser.opt.noscript)\n parser.state = S.BEGIN\n parser.strictEntities = parser.opt.strictEntities\n parser.ENTITIES = parser.strictEntities ? Object.create(sax.XML_ENTITIES) : Object.create(sax.ENTITIES)\n parser.attribList = []\n\n // namespaces form a prototype chain.\n // it always points at the current tag,\n // which protos to its parent tag.\n if (parser.opt.xmlns) {\n parser.ns = Object.create(rootNS)\n }\n\n // mostly just for error reporting\n parser.trackPosition = parser.opt.position !== false\n if (parser.trackPosition) {\n parser.position = parser.line = parser.column = 0\n }\n emit(parser, 'onready')\n }\n\n if (!Object.create) {\n Object.create = function (o) {\n function F () {}\n F.prototype = o\n var newf = new F()\n return newf\n }\n }\n\n if (!Object.keys) {\n Object.keys = function (o) {\n var a = []\n for (var i in o) if (o.hasOwnProperty(i)) a.push(i)\n return a\n }\n }\n\n function checkBufferLength (parser) {\n var maxAllowed = Math.max(sax.MAX_BUFFER_LENGTH, 10)\n var maxActual = 0\n for (var i = 0, l = buffers.length; i < l; i++) {\n var len = parser[buffers[i]].length\n if (len > maxAllowed) {\n // Text/cdata nodes can get big, and since they're buffered,\n // we can get here under normal conditions.\n // Avoid issues by emitting the text node now,\n // so at least it won't get any bigger.\n switch (buffers[i]) {\n case 'textNode':\n closeText(parser)\n break\n\n case 'cdata':\n emitNode(parser, 'oncdata', parser.cdata)\n parser.cdata = ''\n break\n\n case 'script':\n emitNode(parser, 'onscript', parser.script)\n parser.script = ''\n break\n\n default:\n error(parser, 'Max buffer length exceeded: ' + buffers[i])\n }\n }\n maxActual = Math.max(maxActual, len)\n }\n // schedule the next check for the earliest possible buffer overrun.\n var m = sax.MAX_BUFFER_LENGTH - maxActual\n parser.bufferCheckPosition = m + parser.position\n }\n\n function clearBuffers (parser) {\n for (var i = 0, l = buffers.length; i < l; i++) {\n parser[buffers[i]] = ''\n }\n }\n\n function flushBuffers (parser) {\n closeText(parser)\n if (parser.cdata !== '') {\n emitNode(parser, 'oncdata', parser.cdata)\n parser.cdata = ''\n }\n if (parser.script !== '') {\n emitNode(parser, 'onscript', parser.script)\n parser.script = ''\n }\n }\n\n SAXParser.prototype = {\n end: function () { end(this) },\n write: write,\n resume: function () { this.error = null; return this },\n close: function () { return this.write(null) },\n flush: function () { flushBuffers(this) }\n }\n\n var Stream\n try {\n Stream = (__webpack_require__(/*! stream */ \"./node_modules/stream-browserify/index.js\").Stream)\n } catch (ex) {\n Stream = function () {}\n }\n if (!Stream) Stream = function () {}\n\n var streamWraps = sax.EVENTS.filter(function (ev) {\n return ev !== 'error' && ev !== 'end'\n })\n\n function createStream (strict, opt) {\n return new SAXStream(strict, opt)\n }\n\n function SAXStream (strict, opt) {\n if (!(this instanceof SAXStream)) {\n return new SAXStream(strict, opt)\n }\n\n Stream.apply(this)\n\n this._parser = new SAXParser(strict, opt)\n this.writable = true\n this.readable = true\n\n var me = this\n\n this._parser.onend = function () {\n me.emit('end')\n }\n\n this._parser.onerror = function (er) {\n me.emit('error', er)\n\n // if didn't throw, then means error was handled.\n // go ahead and clear error, so we can write again.\n me._parser.error = null\n }\n\n this._decoder = null\n\n streamWraps.forEach(function (ev) {\n Object.defineProperty(me, 'on' + ev, {\n get: function () {\n return me._parser['on' + ev]\n },\n set: function (h) {\n if (!h) {\n me.removeAllListeners(ev)\n me._parser['on' + ev] = h\n return h\n }\n me.on(ev, h)\n },\n enumerable: true,\n configurable: false\n })\n })\n }\n\n SAXStream.prototype = Object.create(Stream.prototype, {\n constructor: {\n value: SAXStream\n }\n })\n\n SAXStream.prototype.write = function (data) {\n if (typeof Buffer === 'function' &&\n typeof Buffer.isBuffer === 'function' &&\n Buffer.isBuffer(data)) {\n if (!this._decoder) {\n var SD = (__webpack_require__(/*! string_decoder */ \"./node_modules/string_decoder/lib/string_decoder.js\").StringDecoder)\n this._decoder = new SD('utf8')\n }\n data = this._decoder.write(data)\n }\n\n this._parser.write(data.toString())\n this.emit('data', data)\n return true\n }\n\n SAXStream.prototype.end = function (chunk) {\n if (chunk && chunk.length) {\n this.write(chunk)\n }\n this._parser.end()\n return true\n }\n\n SAXStream.prototype.on = function (ev, handler) {\n var me = this\n if (!me._parser['on' + ev] && streamWraps.indexOf(ev) !== -1) {\n me._parser['on' + ev] = function () {\n var args = arguments.length === 1 ? [arguments[0]] : Array.apply(null, arguments)\n args.splice(0, 0, ev)\n me.emit.apply(me, args)\n }\n }\n\n return Stream.prototype.on.call(me, ev, handler)\n }\n\n // this really needs to be replaced with character classes.\n // XML allows all manner of ridiculous numbers and digits.\n var CDATA = '[CDATA['\n var DOCTYPE = 'DOCTYPE'\n var XML_NAMESPACE = 'http://www.w3.org/XML/1998/namespace'\n var XMLNS_NAMESPACE = 'http://www.w3.org/2000/xmlns/'\n var rootNS = { xml: XML_NAMESPACE, xmlns: XMLNS_NAMESPACE }\n\n // http://www.w3.org/TR/REC-xml/#NT-NameStartChar\n // This implementation works on strings, a single character at a time\n // as such, it cannot ever support astral-plane characters (10000-EFFFF)\n // without a significant breaking change to either this parser, or the\n // JavaScript language. Implementation of an emoji-capable xml parser\n // is left as an exercise for the reader.\n var nameStart = /[:_A-Za-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD]/\n\n var nameBody = /[:_A-Za-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD\\u00B7\\u0300-\\u036F\\u203F-\\u2040.\\d-]/\n\n var entityStart = /[#:_A-Za-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD]/\n var entityBody = /[#:_A-Za-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD\\u00B7\\u0300-\\u036F\\u203F-\\u2040.\\d-]/\n\n function isWhitespace (c) {\n return c === ' ' || c === '\\n' || c === '\\r' || c === '\\t'\n }\n\n function isQuote (c) {\n return c === '\"' || c === '\\''\n }\n\n function isAttribEnd (c) {\n return c === '>' || isWhitespace(c)\n }\n\n function isMatch (regex, c) {\n return regex.test(c)\n }\n\n function notMatch (regex, c) {\n return !isMatch(regex, c)\n }\n\n var S = 0\n sax.STATE = {\n BEGIN: S++, // leading byte order mark or whitespace\n BEGIN_WHITESPACE: S++, // leading whitespace\n TEXT: S++, // general stuff\n TEXT_ENTITY: S++, // & and such.\n OPEN_WAKA: S++, // <\n SGML_DECL: S++, // \n SCRIPT: S++, //