(self["webpackChunk_JUPYTERLAB_CORE_OUTPUT"] = self["webpackChunk_JUPYTERLAB_CORE_OUTPUT"] || []).push([[4926,7061],{ /***/ 21857: /***/ ((module) => { "use strict"; var isMergeableObject = function isMergeableObject(value) { return isNonNullObject(value) && !isSpecial(value) }; function isNonNullObject(value) { return !!value && typeof value === 'object' } function isSpecial(value) { var stringValue = Object.prototype.toString.call(value); return stringValue === '[object RegExp]' || stringValue === '[object Date]' || isReactElement(value) } // see https://github.com/facebook/react/blob/b5ac963fb791d1298e7f396236383bc955f916c1/src/isomorphic/classic/element/ReactElement.js#L21-L25 var canUseSymbol = typeof Symbol === 'function' && Symbol.for; var REACT_ELEMENT_TYPE = canUseSymbol ? Symbol.for('react.element') : 0xeac7; function isReactElement(value) { return value.$$typeof === REACT_ELEMENT_TYPE } function emptyTarget(val) { return Array.isArray(val) ? [] : {} } function cloneUnlessOtherwiseSpecified(value, options) { return (options.clone !== false && options.isMergeableObject(value)) ? deepmerge(emptyTarget(value), value, options) : value } function defaultArrayMerge(target, source, options) { return target.concat(source).map(function(element) { return cloneUnlessOtherwiseSpecified(element, options) }) } function getMergeFunction(key, options) { if (!options.customMerge) { return deepmerge } var customMerge = options.customMerge(key); return typeof customMerge === 'function' ? customMerge : deepmerge } function getEnumerableOwnPropertySymbols(target) { return Object.getOwnPropertySymbols ? Object.getOwnPropertySymbols(target).filter(function(symbol) { return Object.propertyIsEnumerable.call(target, symbol) }) : [] } function getKeys(target) { return Object.keys(target).concat(getEnumerableOwnPropertySymbols(target)) } function propertyIsOnObject(object, property) { try { return property in object } catch(_) { return false } } // Protects from prototype poisoning and unexpected merging up the prototype chain. function propertyIsUnsafe(target, key) { return propertyIsOnObject(target, key) // Properties are safe to merge if they don't exist in the target yet, && !(Object.hasOwnProperty.call(target, key) // unsafe if they exist up the prototype chain, && Object.propertyIsEnumerable.call(target, key)) // and also unsafe if they're nonenumerable. } function mergeObject(target, source, options) { var destination = {}; if (options.isMergeableObject(target)) { getKeys(target).forEach(function(key) { destination[key] = cloneUnlessOtherwiseSpecified(target[key], options); }); } getKeys(source).forEach(function(key) { if (propertyIsUnsafe(target, key)) { return } if (propertyIsOnObject(target, key) && options.isMergeableObject(source[key])) { destination[key] = getMergeFunction(key, options)(target[key], source[key], options); } else { destination[key] = cloneUnlessOtherwiseSpecified(source[key], options); } }); return destination } function deepmerge(target, source, options) { options = options || {}; options.arrayMerge = options.arrayMerge || defaultArrayMerge; options.isMergeableObject = options.isMergeableObject || isMergeableObject; // cloneUnlessOtherwiseSpecified is added to `options` so that custom arrayMerge() // implementations can use it. The caller may not replace it. options.cloneUnlessOtherwiseSpecified = cloneUnlessOtherwiseSpecified; var sourceIsArray = Array.isArray(source); var targetIsArray = Array.isArray(target); var sourceAndTargetTypesMatch = sourceIsArray === targetIsArray; if (!sourceAndTargetTypesMatch) { return cloneUnlessOtherwiseSpecified(source, options) } else if (sourceIsArray) { return options.arrayMerge(target, source, options) } else { return mergeObject(target, source, options) } } deepmerge.all = function deepmergeAll(array, options) { if (!Array.isArray(array)) { throw new Error('first argument should be an array') } return array.reduce(function(prev, next) { return deepmerge(prev, next, options) }, {}) }; var deepmerge_1 = deepmerge; module.exports = deepmerge_1; /***/ }), /***/ 80289: /***/ ((__unused_webpack_module, exports) => { "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); exports.Doctype = exports.CDATA = exports.Tag = exports.Style = exports.Script = exports.Comment = exports.Directive = exports.Text = exports.Root = exports.isTag = exports.ElementType = void 0; /** Types of elements found in htmlparser2's DOM */ var ElementType; (function (ElementType) { /** Type for the root element of a document */ ElementType["Root"] = "root"; /** Type for Text */ ElementType["Text"] = "text"; /** Type for */ ElementType["Directive"] = "directive"; /** Type for */ ElementType["Comment"] = "comment"; /** Type for `. this.sequenceIndex = Number(c === CharCodes.Lt); } }; Tokenizer.prototype.stateCDATASequence = function (c) { if (c === Sequences.Cdata[this.sequenceIndex]) { if (++this.sequenceIndex === Sequences.Cdata.length) { this.state = State.InCommentLike; this.currentSequence = Sequences.CdataEnd; this.sequenceIndex = 0; this.sectionStart = this.index + 1; } } else { this.sequenceIndex = 0; this.state = State.InDeclaration; this.stateInDeclaration(c); // Reconsume the character } }; /** * When we wait for one specific character, we can speed things up * by skipping through the buffer until we find it. * * @returns Whether the character was found. */ Tokenizer.prototype.fastForwardTo = function (c) { while (++this.index < this.buffer.length + this.offset) { if (this.buffer.charCodeAt(this.index - this.offset) === c) { return true; } } /* * We increment the index at the end of the `parse` loop, * so set it to `buffer.length - 1` here. * * TODO: Refactor `parse` to increment index before calling states. */ this.index = this.buffer.length + this.offset - 1; return false; }; /** * Comments and CDATA end with `-->` and `]]>`. * * Their common qualities are: * - Their end sequences have a distinct character they start with. * - That character is then repeated, so we have to check multiple repeats. * - All characters but the start character of the sequence can be skipped. */ Tokenizer.prototype.stateInCommentLike = function (c) { if (c === this.currentSequence[this.sequenceIndex]) { if (++this.sequenceIndex === this.currentSequence.length) { if (this.currentSequence === Sequences.CdataEnd) { this.cbs.oncdata(this.sectionStart, this.index, 2); } else { this.cbs.oncomment(this.sectionStart, this.index, 2); } this.sequenceIndex = 0; this.sectionStart = this.index + 1; this.state = State.Text; } } else if (this.sequenceIndex === 0) { // Fast-forward to the first character of the sequence if (this.fastForwardTo(this.currentSequence[0])) { this.sequenceIndex = 1; } } else if (c !== this.currentSequence[this.sequenceIndex - 1]) { // Allow long sequences, eg. --->, ]]]> this.sequenceIndex = 0; } }; /** * HTML only allows ASCII alpha characters (a-z and A-Z) at the beginning of a tag name. * * XML allows a lot more characters here (@see https://www.w3.org/TR/REC-xml/#NT-NameStartChar). * We allow anything that wouldn't end the tag. */ Tokenizer.prototype.isTagStartChar = function (c) { return this.xmlMode ? !isEndOfTagSection(c) : isASCIIAlpha(c); }; Tokenizer.prototype.startSpecial = function (sequence, offset) { this.isSpecial = true; this.currentSequence = sequence; this.sequenceIndex = offset; this.state = State.SpecialStartSequence; }; Tokenizer.prototype.stateBeforeTagName = function (c) { if (c === CharCodes.ExclamationMark) { this.state = State.BeforeDeclaration; this.sectionStart = this.index + 1; } else if (c === CharCodes.Questionmark) { this.state = State.InProcessingInstruction; this.sectionStart = this.index + 1; } else if (this.isTagStartChar(c)) { var lower = c | 0x20; this.sectionStart = this.index; if (!this.xmlMode && lower === Sequences.TitleEnd[2]) { this.startSpecial(Sequences.TitleEnd, 3); } else { this.state = !this.xmlMode && lower === Sequences.ScriptEnd[2] ? State.BeforeSpecialS : State.InTagName; } } else if (c === CharCodes.Slash) { this.state = State.BeforeClosingTagName; } else { this.state = State.Text; this.stateText(c); } }; Tokenizer.prototype.stateInTagName = function (c) { if (isEndOfTagSection(c)) { this.cbs.onopentagname(this.sectionStart, this.index); this.sectionStart = -1; this.state = State.BeforeAttributeName; this.stateBeforeAttributeName(c); } }; Tokenizer.prototype.stateBeforeClosingTagName = function (c) { if (isWhitespace(c)) { // Ignore } else if (c === CharCodes.Gt) { this.state = State.Text; } else { this.state = this.isTagStartChar(c) ? State.InClosingTagName : State.InSpecialComment; this.sectionStart = this.index; } }; Tokenizer.prototype.stateInClosingTagName = function (c) { if (c === CharCodes.Gt || isWhitespace(c)) { this.cbs.onclosetag(this.sectionStart, this.index); this.sectionStart = -1; this.state = State.AfterClosingTagName; this.stateAfterClosingTagName(c); } }; Tokenizer.prototype.stateAfterClosingTagName = function (c) { // Skip everything until ">" if (c === CharCodes.Gt || this.fastForwardTo(CharCodes.Gt)) { this.state = State.Text; this.baseState = State.Text; this.sectionStart = this.index + 1; } }; Tokenizer.prototype.stateBeforeAttributeName = function (c) { if (c === CharCodes.Gt) { this.cbs.onopentagend(this.index); if (this.isSpecial) { this.state = State.InSpecialTag; this.sequenceIndex = 0; } else { this.state = State.Text; } this.baseState = this.state; this.sectionStart = this.index + 1; } else if (c === CharCodes.Slash) { this.state = State.InSelfClosingTag; } else if (!isWhitespace(c)) { this.state = State.InAttributeName; this.sectionStart = this.index; } }; Tokenizer.prototype.stateInSelfClosingTag = function (c) { if (c === CharCodes.Gt) { this.cbs.onselfclosingtag(this.index); this.state = State.Text; this.baseState = State.Text; this.sectionStart = this.index + 1; this.isSpecial = false; // Reset special state, in case of self-closing special tags } else if (!isWhitespace(c)) { this.state = State.BeforeAttributeName; this.stateBeforeAttributeName(c); } }; Tokenizer.prototype.stateInAttributeName = function (c) { if (c === CharCodes.Eq || isEndOfTagSection(c)) { this.cbs.onattribname(this.sectionStart, this.index); this.sectionStart = -1; this.state = State.AfterAttributeName; this.stateAfterAttributeName(c); } }; Tokenizer.prototype.stateAfterAttributeName = function (c) { if (c === CharCodes.Eq) { this.state = State.BeforeAttributeValue; } else if (c === CharCodes.Slash || c === CharCodes.Gt) { this.cbs.onattribend(QuoteType.NoValue, this.index); this.state = State.BeforeAttributeName; this.stateBeforeAttributeName(c); } else if (!isWhitespace(c)) { this.cbs.onattribend(QuoteType.NoValue, this.index); this.state = State.InAttributeName; this.sectionStart = this.index; } }; Tokenizer.prototype.stateBeforeAttributeValue = function (c) { if (c === CharCodes.DoubleQuote) { this.state = State.InAttributeValueDq; this.sectionStart = this.index + 1; } else if (c === CharCodes.SingleQuote) { this.state = State.InAttributeValueSq; this.sectionStart = this.index + 1; } else if (!isWhitespace(c)) { this.sectionStart = this.index; this.state = State.InAttributeValueNq; this.stateInAttributeValueNoQuotes(c); // Reconsume token } }; Tokenizer.prototype.handleInAttributeValue = function (c, quote) { if (c === quote || (!this.decodeEntities && this.fastForwardTo(quote))) { this.cbs.onattribdata(this.sectionStart, this.index); this.sectionStart = -1; this.cbs.onattribend(quote === CharCodes.DoubleQuote ? QuoteType.Double : QuoteType.Single, this.index); this.state = State.BeforeAttributeName; } else if (this.decodeEntities && c === CharCodes.Amp) { this.baseState = this.state; this.state = State.BeforeEntity; } }; Tokenizer.prototype.stateInAttributeValueDoubleQuotes = function (c) { this.handleInAttributeValue(c, CharCodes.DoubleQuote); }; Tokenizer.prototype.stateInAttributeValueSingleQuotes = function (c) { this.handleInAttributeValue(c, CharCodes.SingleQuote); }; Tokenizer.prototype.stateInAttributeValueNoQuotes = function (c) { if (isWhitespace(c) || c === CharCodes.Gt) { this.cbs.onattribdata(this.sectionStart, this.index); this.sectionStart = -1; this.cbs.onattribend(QuoteType.Unquoted, this.index); this.state = State.BeforeAttributeName; this.stateBeforeAttributeName(c); } else if (this.decodeEntities && c === CharCodes.Amp) { this.baseState = this.state; this.state = State.BeforeEntity; } }; Tokenizer.prototype.stateBeforeDeclaration = function (c) { if (c === CharCodes.OpeningSquareBracket) { this.state = State.CDATASequence; this.sequenceIndex = 0; } else { this.state = c === CharCodes.Dash ? State.BeforeComment : State.InDeclaration; } }; Tokenizer.prototype.stateInDeclaration = function (c) { if (c === CharCodes.Gt || this.fastForwardTo(CharCodes.Gt)) { this.cbs.ondeclaration(this.sectionStart, this.index); this.state = State.Text; this.sectionStart = this.index + 1; } }; Tokenizer.prototype.stateInProcessingInstruction = function (c) { if (c === CharCodes.Gt || this.fastForwardTo(CharCodes.Gt)) { this.cbs.onprocessinginstruction(this.sectionStart, this.index); this.state = State.Text; this.sectionStart = this.index + 1; } }; Tokenizer.prototype.stateBeforeComment = function (c) { if (c === CharCodes.Dash) { this.state = State.InCommentLike; this.currentSequence = Sequences.CommentEnd; // Allow short comments (eg. ) this.sequenceIndex = 2; this.sectionStart = this.index + 1; } else { this.state = State.InDeclaration; } }; Tokenizer.prototype.stateInSpecialComment = function (c) { if (c === CharCodes.Gt || this.fastForwardTo(CharCodes.Gt)) { this.cbs.oncomment(this.sectionStart, this.index, 0); this.state = State.Text; this.sectionStart = this.index + 1; } }; Tokenizer.prototype.stateBeforeSpecialS = function (c) { var lower = c | 0x20; if (lower === Sequences.ScriptEnd[3]) { this.startSpecial(Sequences.ScriptEnd, 4); } else if (lower === Sequences.StyleEnd[3]) { this.startSpecial(Sequences.StyleEnd, 4); } else { this.state = State.InTagName; this.stateInTagName(c); // Consume the token again } }; Tokenizer.prototype.stateBeforeEntity = function (c) { // Start excess with 1 to include the '&' this.entityExcess = 1; this.entityResult = 0; if (c === CharCodes.Number) { this.state = State.BeforeNumericEntity; } else if (c === CharCodes.Amp) { // We have two `&` characters in a row. Stay in the current state. } else { this.trieIndex = 0; this.trieCurrent = this.entityTrie[0]; this.state = State.InNamedEntity; this.stateInNamedEntity(c); } }; Tokenizer.prototype.stateInNamedEntity = function (c) { this.entityExcess += 1; this.trieIndex = (0, decode_js_1.determineBranch)(this.entityTrie, this.trieCurrent, this.trieIndex + 1, c); if (this.trieIndex < 0) { this.emitNamedEntity(); this.index--; return; } this.trieCurrent = this.entityTrie[this.trieIndex]; var masked = this.trieCurrent & decode_js_1.BinTrieFlags.VALUE_LENGTH; // If the branch is a value, store it and continue if (masked) { // The mask is the number of bytes of the value, including the current byte. var valueLength = (masked >> 14) - 1; // If we have a legacy entity while parsing strictly, just skip the number of bytes if (!this.allowLegacyEntity() && c !== CharCodes.Semi) { this.trieIndex += valueLength; } else { // Add 1 as we have already incremented the excess var entityStart = this.index - this.entityExcess + 1; if (entityStart > this.sectionStart) { this.emitPartial(this.sectionStart, entityStart); } // If this is a surrogate pair, consume the next two bytes this.entityResult = this.trieIndex; this.trieIndex += valueLength; this.entityExcess = 0; this.sectionStart = this.index + 1; if (valueLength === 0) { this.emitNamedEntity(); } } } }; Tokenizer.prototype.emitNamedEntity = function () { this.state = this.baseState; if (this.entityResult === 0) { return; } var valueLength = (this.entityTrie[this.entityResult] & decode_js_1.BinTrieFlags.VALUE_LENGTH) >> 14; switch (valueLength) { case 1: { this.emitCodePoint(this.entityTrie[this.entityResult] & ~decode_js_1.BinTrieFlags.VALUE_LENGTH); break; } case 2: { this.emitCodePoint(this.entityTrie[this.entityResult + 1]); break; } case 3: { this.emitCodePoint(this.entityTrie[this.entityResult + 1]); this.emitCodePoint(this.entityTrie[this.entityResult + 2]); } } }; Tokenizer.prototype.stateBeforeNumericEntity = function (c) { if ((c | 0x20) === CharCodes.LowerX) { this.entityExcess++; this.state = State.InHexEntity; } else { this.state = State.InNumericEntity; this.stateInNumericEntity(c); } }; Tokenizer.prototype.emitNumericEntity = function (strict) { var entityStart = this.index - this.entityExcess - 1; var numberStart = entityStart + 2 + Number(this.state === State.InHexEntity); if (numberStart !== this.index) { // Emit leading data if any if (entityStart > this.sectionStart) { this.emitPartial(this.sectionStart, entityStart); } this.sectionStart = this.index + Number(strict); this.emitCodePoint((0, decode_js_1.replaceCodePoint)(this.entityResult)); } this.state = this.baseState; }; Tokenizer.prototype.stateInNumericEntity = function (c) { if (c === CharCodes.Semi) { this.emitNumericEntity(true); } else if (isNumber(c)) { this.entityResult = this.entityResult * 10 + (c - CharCodes.Zero); this.entityExcess++; } else { if (this.allowLegacyEntity()) { this.emitNumericEntity(false); } else { this.state = this.baseState; } this.index--; } }; Tokenizer.prototype.stateInHexEntity = function (c) { if (c === CharCodes.Semi) { this.emitNumericEntity(true); } else if (isNumber(c)) { this.entityResult = this.entityResult * 16 + (c - CharCodes.Zero); this.entityExcess++; } else if (isHexDigit(c)) { this.entityResult = this.entityResult * 16 + ((c | 0x20) - CharCodes.LowerA + 10); this.entityExcess++; } else { if (this.allowLegacyEntity()) { this.emitNumericEntity(false); } else { this.state = this.baseState; } this.index--; } }; Tokenizer.prototype.allowLegacyEntity = function () { return (!this.xmlMode && (this.baseState === State.Text || this.baseState === State.InSpecialTag)); }; /** * Remove data that has already been consumed from the buffer. */ Tokenizer.prototype.cleanup = function () { // If we are inside of text or attributes, emit what we already have. if (this.running && this.sectionStart !== this.index) { if (this.state === State.Text || (this.state === State.InSpecialTag && this.sequenceIndex === 0)) { this.cbs.ontext(this.sectionStart, this.index); this.sectionStart = this.index; } else if (this.state === State.InAttributeValueDq || this.state === State.InAttributeValueSq || this.state === State.InAttributeValueNq) { this.cbs.onattribdata(this.sectionStart, this.index); this.sectionStart = this.index; } } }; Tokenizer.prototype.shouldContinue = function () { return this.index < this.buffer.length + this.offset && this.running; }; /** * Iterates through the buffer, calling the function corresponding to the current state. * * States that are more likely to be hit are higher up, as a performance improvement. */ Tokenizer.prototype.parse = function () { while (this.shouldContinue()) { var c = this.buffer.charCodeAt(this.index - this.offset); switch (this.state) { case State.Text: { this.stateText(c); break; } case State.SpecialStartSequence: { this.stateSpecialStartSequence(c); break; } case State.InSpecialTag: { this.stateInSpecialTag(c); break; } case State.CDATASequence: { this.stateCDATASequence(c); break; } case State.InAttributeValueDq: { this.stateInAttributeValueDoubleQuotes(c); break; } case State.InAttributeName: { this.stateInAttributeName(c); break; } case State.InCommentLike: { this.stateInCommentLike(c); break; } case State.InSpecialComment: { this.stateInSpecialComment(c); break; } case State.BeforeAttributeName: { this.stateBeforeAttributeName(c); break; } case State.InTagName: { this.stateInTagName(c); break; } case State.InClosingTagName: { this.stateInClosingTagName(c); break; } case State.BeforeTagName: { this.stateBeforeTagName(c); break; } case State.AfterAttributeName: { this.stateAfterAttributeName(c); break; } case State.InAttributeValueSq: { this.stateInAttributeValueSingleQuotes(c); break; } case State.BeforeAttributeValue: { this.stateBeforeAttributeValue(c); break; } case State.BeforeClosingTagName: { this.stateBeforeClosingTagName(c); break; } case State.AfterClosingTagName: { this.stateAfterClosingTagName(c); break; } case State.BeforeSpecialS: { this.stateBeforeSpecialS(c); break; } case State.InAttributeValueNq: { this.stateInAttributeValueNoQuotes(c); break; } case State.InSelfClosingTag: { this.stateInSelfClosingTag(c); break; } case State.InDeclaration: { this.stateInDeclaration(c); break; } case State.BeforeDeclaration: { this.stateBeforeDeclaration(c); break; } case State.BeforeComment: { this.stateBeforeComment(c); break; } case State.InProcessingInstruction: { this.stateInProcessingInstruction(c); break; } case State.InNamedEntity: { this.stateInNamedEntity(c); break; } case State.BeforeEntity: { this.stateBeforeEntity(c); break; } case State.InHexEntity: { this.stateInHexEntity(c); break; } case State.InNumericEntity: { this.stateInNumericEntity(c); break; } default: { // `this._state === State.BeforeNumericEntity` this.stateBeforeNumericEntity(c); } } this.index++; } this.cleanup(); }; Tokenizer.prototype.finish = function () { if (this.state === State.InNamedEntity) { this.emitNamedEntity(); } // If there is remaining data, emit it in a reasonable way if (this.sectionStart < this.index) { this.handleTrailingData(); } this.cbs.onend(); }; /** Handle any trailing data. */ Tokenizer.prototype.handleTrailingData = function () { var endIndex = this.buffer.length + this.offset; if (this.state === State.InCommentLike) { if (this.currentSequence === Sequences.CdataEnd) { this.cbs.oncdata(this.sectionStart, endIndex, 0); } else { this.cbs.oncomment(this.sectionStart, endIndex, 0); } } else if (this.state === State.InNumericEntity && this.allowLegacyEntity()) { this.emitNumericEntity(false); // All trailing data will have been consumed } else if (this.state === State.InHexEntity && this.allowLegacyEntity()) { this.emitNumericEntity(false); // All trailing data will have been consumed } else if (this.state === State.InTagName || this.state === State.BeforeAttributeName || this.state === State.BeforeAttributeValue || this.state === State.AfterAttributeName || this.state === State.InAttributeName || this.state === State.InAttributeValueSq || this.state === State.InAttributeValueDq || this.state === State.InAttributeValueNq || this.state === State.InClosingTagName) { /* * If we are currently in an opening or closing tag, us not calling the * respective callback signals that the tag should be ignored. */ } else { this.cbs.ontext(this.sectionStart, endIndex); } }; Tokenizer.prototype.emitPartial = function (start, endIndex) { if (this.baseState !== State.Text && this.baseState !== State.InSpecialTag) { this.cbs.onattribdata(start, endIndex); } else { this.cbs.ontext(start, endIndex); } }; Tokenizer.prototype.emitCodePoint = function (cp) { if (this.baseState !== State.Text && this.baseState !== State.InSpecialTag) { this.cbs.onattribentity(cp); } else { this.cbs.ontextentity(cp); } }; return Tokenizer; }()); exports["default"] = Tokenizer; //# sourceMappingURL=Tokenizer.js.map /***/ }), /***/ 62789: /***/ (function(__unused_webpack_module, exports, __webpack_require__) { "use strict"; var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { if (k2 === undefined) k2 = k; var desc = Object.getOwnPropertyDescriptor(m, k); if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { desc = { enumerable: true, get: function() { return m[k]; } }; } Object.defineProperty(o, k2, desc); }) : (function(o, m, k, k2) { if (k2 === undefined) k2 = k; o[k2] = m[k]; })); var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { Object.defineProperty(o, "default", { enumerable: true, value: v }); }) : function(o, v) { o["default"] = v; }); var __importStar = (this && this.__importStar) || function (mod) { if (mod && mod.__esModule) return mod; var result = {}; if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); __setModuleDefault(result, mod); return result; }; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", ({ value: true })); exports.DomUtils = exports.parseFeed = exports.getFeed = exports.ElementType = exports.Tokenizer = exports.createDomStream = exports.parseDOM = exports.parseDocument = exports.DefaultHandler = exports.DomHandler = exports.Parser = void 0; var Parser_js_1 = __webpack_require__(81366); var Parser_js_2 = __webpack_require__(81366); Object.defineProperty(exports, "Parser", ({ enumerable: true, get: function () { return Parser_js_2.Parser; } })); var domhandler_1 = __webpack_require__(16920); var domhandler_2 = __webpack_require__(16920); Object.defineProperty(exports, "DomHandler", ({ enumerable: true, get: function () { return domhandler_2.DomHandler; } })); // Old name for DomHandler Object.defineProperty(exports, "DefaultHandler", ({ enumerable: true, get: function () { return domhandler_2.DomHandler; } })); // Helper methods /** * Parses the data, returns the resulting document. * * @param data The data that should be parsed. * @param options Optional options for the parser and DOM builder. */ function parseDocument(data, options) { var handler = new domhandler_1.DomHandler(undefined, options); new Parser_js_1.Parser(handler, options).end(data); return handler.root; } exports.parseDocument = parseDocument; /** * Parses data, returns an array of the root nodes. * * Note that the root nodes still have a `Document` node as their parent. * Use `parseDocument` to get the `Document` node instead. * * @param data The data that should be parsed. * @param options Optional options for the parser and DOM builder. * @deprecated Use `parseDocument` instead. */ function parseDOM(data, options) { return parseDocument(data, options).children; } exports.parseDOM = parseDOM; /** * Creates a parser instance, with an attached DOM handler. * * @param callback A callback that will be called once parsing has been completed. * @param options Optional options for the parser and DOM builder. * @param elementCallback An optional callback that will be called every time a tag has been completed inside of the DOM. */ function createDomStream(callback, options, elementCallback) { var handler = new domhandler_1.DomHandler(callback, options, elementCallback); return new Parser_js_1.Parser(handler, options); } exports.createDomStream = createDomStream; var Tokenizer_js_1 = __webpack_require__(21211); Object.defineProperty(exports, "Tokenizer", ({ enumerable: true, get: function () { return __importDefault(Tokenizer_js_1).default; } })); /* * All of the following exports exist for backwards-compatibility. * They should probably be removed eventually. */ exports.ElementType = __importStar(__webpack_require__(80289)); var domutils_1 = __webpack_require__(93335); var domutils_2 = __webpack_require__(93335); Object.defineProperty(exports, "getFeed", ({ enumerable: true, get: function () { return domutils_2.getFeed; } })); var parseFeedDefaultOptions = { xmlMode: true }; /** * Parse a feed. * * @param feed The feed that should be parsed, as a string. * @param options Optionally, options for parsing. When using this, you should set `xmlMode` to `true`. */ function parseFeed(feed, options) { if (options === void 0) { options = parseFeedDefaultOptions; } return (0, domutils_1.getFeed)(parseDOM(feed, options)); } exports.parseFeed = parseFeed; exports.DomUtils = __importStar(__webpack_require__(93335)); //# sourceMappingURL=index.js.map /***/ }), /***/ 88773: /***/ ((module) => { // This alphabet uses `A-Za-z0-9_-` symbols. // The order of characters is optimized for better gzip and brotli compression. // References to the same file (works both for gzip and brotli): // `'use`, `andom`, and `rict'` // References to the brotli default dictionary: // `-26T`, `1983`, `40px`, `75px`, `bush`, `jack`, `mind`, `very`, and `wolf` let urlAlphabet = 'useandom-26T198340PX75pxJACKVERYMINDBUSHWOLF_GQZbfghjklqvwyzrict' let customAlphabet = (alphabet, defaultSize = 21) => { return (size = defaultSize) => { let id = '' // A compact alternative for `for (var i = 0; i < step; i++)`. let i = size | 0 while (i--) { // `| 0` is more compact and faster than `Math.floor()`. id += alphabet[(Math.random() * alphabet.length) | 0] } return id } } let nanoid = (size = 21) => { let id = '' // A compact alternative for `for (var i = 0; i < step; i++)`. let i = size | 0 while (i--) { // `| 0` is more compact and faster than `Math.floor()`. id += urlAlphabet[(Math.random() * 64) | 0] } return id } module.exports = { nanoid, customAlphabet } /***/ }) }]); //# sourceMappingURL=4926.4dee15ed968871f6651b.js.map?v=4dee15ed968871f6651b