'use strict'; var fs$1 = require('node:fs/promises'); var path = require('node:path'); var fs = require('node:fs'); var process$1 = require('node:process'); var os = require('node:os'); var tty = require('node:tty'); var node_module = require('node:module'); var node_url = require('node:url'); var assert = require('node:assert'); var v8 = require('node:v8'); var node_util = require('node:util'); var _documentCurrentScript = typeof document !== 'undefined' ? document.currentScript : null; const VOID = -1; const PRIMITIVE = 0; const ARRAY = 1; const OBJECT = 2; const DATE = 3; const REGEXP = 4; const MAP = 5; const SET = 6; const ERROR = 7; const BIGINT = 8; // export const SYMBOL = 9; const env$1 = typeof self === 'object' ? self : globalThis; const deserializer = ($, _) => { const as = (out, index) => { $.set(index, out); return out; }; const unpair = index => { if ($.has(index)) return $.get(index); const [type, value] = _[index]; switch (type) { case PRIMITIVE: case VOID: return as(value, index); case ARRAY: { const arr = as([], index); for (const index of value) arr.push(unpair(index)); return arr; } case OBJECT: { const object = as({}, index); for (const [key, index] of value) object[unpair(key)] = unpair(index); return object; } case DATE: return as(new Date(value), index); case REGEXP: { const {source, flags} = value; return as(new RegExp(source, flags), index); } case MAP: { const map = as(new Map, index); for (const [key, index] of value) map.set(unpair(key), unpair(index)); return map; } case SET: { const set = as(new Set, index); for (const index of value) set.add(unpair(index)); return set; } case ERROR: { const {name, message} = value; return as(new env$1[name](message), index); } case BIGINT: return as(BigInt(value), index); case 'BigInt': return as(Object(BigInt(value)), index); } return as(new env$1[type](value), index); }; return unpair; }; /** * @typedef {Array} Record a type representation */ /** * Returns a deserialized value from a serialized array of Records. * @param {Record[]} serialized a previously serialized value. * @returns {any} */ const deserialize = serialized => deserializer(new Map, serialized)(0); const EMPTY = ''; const {toString: toString$2} = {}; const {keys: keys$1} = Object; const typeOf = value => { const type = typeof value; if (type !== 'object' || !value) return [PRIMITIVE, type]; const asString = toString$2.call(value).slice(8, -1); switch (asString) { case 'Array': return [ARRAY, EMPTY]; case 'Object': return [OBJECT, EMPTY]; case 'Date': return [DATE, EMPTY]; case 'RegExp': return [REGEXP, EMPTY]; case 'Map': return [MAP, EMPTY]; case 'Set': return [SET, EMPTY]; } if (asString.includes('Array')) return [ARRAY, asString]; if (asString.includes('Error')) return [ERROR, asString]; return [OBJECT, asString]; }; const shouldSkip = ([TYPE, type]) => ( TYPE === PRIMITIVE && (type === 'function' || type === 'symbol') ); const serializer = (strict, json, $, _) => { const as = (out, value) => { const index = _.push(out) - 1; $.set(value, index); return index; }; const pair = value => { if ($.has(value)) return $.get(value); let [TYPE, type] = typeOf(value); switch (TYPE) { case PRIMITIVE: { let entry = value; switch (type) { case 'bigint': TYPE = BIGINT; entry = value.toString(); break; case 'function': case 'symbol': if (strict) throw new TypeError('unable to serialize ' + type); entry = null; break; case 'undefined': return as([VOID], value); } return as([TYPE, entry], value); } case ARRAY: { if (type) return as([type, [...value]], value); const arr = []; const index = as([TYPE, arr], value); for (const entry of value) arr.push(pair(entry)); return index; } case OBJECT: { if (type) { switch (type) { case 'BigInt': return as([type, value.toString()], value); case 'Boolean': case 'Number': case 'String': return as([type, value.valueOf()], value); } } if (json && ('toJSON' in value)) return pair(value.toJSON()); const entries = []; const index = as([TYPE, entries], value); for (const key of keys$1(value)) { if (strict || !shouldSkip(typeOf(value[key]))) entries.push([pair(key), pair(value[key])]); } return index; } case DATE: return as([TYPE, value.toISOString()], value); case REGEXP: { const {source, flags} = value; return as([TYPE, {source, flags}], value); } case MAP: { const entries = []; const index = as([TYPE, entries], value); for (const [key, entry] of value) { if (strict || !(shouldSkip(typeOf(key)) || shouldSkip(typeOf(entry)))) entries.push([pair(key), pair(entry)]); } return index; } case SET: { const entries = []; const index = as([TYPE, entries], value); for (const entry of value) { if (strict || !shouldSkip(typeOf(entry))) entries.push(pair(entry)); } return index; } } const {message} = value; return as([TYPE, {name: type, message}], value); }; return pair; }; /** * @typedef {Array} Record a type representation */ /** * Returns an array of serialized Records. * @param {any} value a serializable value. * @param {{json?: boolean, lossy?: boolean}?} options an object with a `lossy` or `json` property that, * if `true`, will not throw errors on incompatible types, and behave more * like JSON stringify would behave. Symbol and Function will be discarded. * @returns {Record[]} */ const serialize = (value, {json, lossy} = {}) => { const _ = []; return serializer(!(json || lossy), !!json, new Map, _)(value), _; }; /** * @typedef {Array} Record a type representation */ /** * Returns an array of serialized Records. * @param {any} any a serializable value. * @param {{transfer?: any[], json?: boolean, lossy?: boolean}?} options an object with * a transfer option (ignored when polyfilled) and/or non standard fields that * fallback to the polyfill if present. * @returns {Record[]} */ var structuredClone$1 = typeof structuredClone === "function" ? /* c8 ignore start */ (any, options) => ( options && ('json' in options || 'lossy' in options) ? deserialize(serialize(any, options)) : structuredClone(any) ) : (any, options) => deserialize(serialize(any, options)); /** * Throw a given error. * * @param {Error|null|undefined} [error] * Maybe error. * @returns {asserts error is null|undefined} */ function bail(error) { if (error) { throw error } } function isPlainObject(value) { if (typeof value !== 'object' || value === null) { return false; } const prototype = Object.getPrototypeOf(value); return (prototype === null || prototype === Object.prototype || Object.getPrototypeOf(prototype) === null) && !(Symbol.toStringTag in value) && !(Symbol.iterator in value); } // To do: remove `void`s // To do: remove `null` from output of our APIs, allow it as user APIs. /** * @typedef {(error?: Error | null | undefined, ...output: Array) => void} Callback * Callback. * * @typedef {(...input: Array) => any} Middleware * Ware. * * @typedef Pipeline * Pipeline. * @property {Run} run * Run the pipeline. * @property {Use} use * Add middleware. * * @typedef {(...input: Array) => void} Run * Call all middleware. * * Calls `done` on completion with either an error or the output of the * last middleware. * * > 👉 **Note**: as the length of input defines whether async functions get a * > `next` function, * > it’s recommended to keep `input` at one value normally. * * @typedef {(fn: Middleware) => Pipeline} Use * Add middleware. */ /** * Create new middleware. * * @returns {Pipeline} * Pipeline. */ function trough() { /** @type {Array} */ const fns = []; /** @type {Pipeline} */ const pipeline = {run, use}; return pipeline /** @type {Run} */ function run(...values) { let middlewareIndex = -1; /** @type {Callback} */ const callback = values.pop(); if (typeof callback !== 'function') { throw new TypeError('Expected function as last argument, not ' + callback) } next(null, ...values); /** * Run the next `fn`, or we’re done. * * @param {Error | null | undefined} error * @param {Array} output */ function next(error, ...output) { const fn = fns[++middlewareIndex]; let index = -1; if (error) { callback(error); return } // Copy non-nullish input into values. while (++index < values.length) { if (output[index] === null || output[index] === undefined) { output[index] = values[index]; } } // Save the newly created `output` for the next call. values = output; // Next or done. if (fn) { wrap(fn, next)(...output); } else { callback(null, ...output); } } } /** @type {Use} */ function use(middelware) { if (typeof middelware !== 'function') { throw new TypeError( 'Expected `middelware` to be a function, not ' + middelware ) } fns.push(middelware); return pipeline } } /** * Wrap `middleware` into a uniform interface. * * You can pass all input to the resulting function. * `callback` is then called with the output of `middleware`. * * If `middleware` accepts more arguments than the later given in input, * an extra `done` function is passed to it after that input, * which must be called by `middleware`. * * The first value in `input` is the main input value. * All other input values are the rest input values. * The values given to `callback` are the input values, * merged with every non-nullish output value. * * * if `middleware` throws an error, * returns a promise that is rejected, * or calls the given `done` function with an error, * `callback` is called with that error * * if `middleware` returns a value or returns a promise that is resolved, * that value is the main output value * * if `middleware` calls `done`, * all non-nullish values except for the first one (the error) overwrite the * output values * * @param {Middleware} middleware * Function to wrap. * @param {Callback} callback * Callback called with the output of `middleware`. * @returns {Run} * Wrapped middleware. */ function wrap(middleware, callback) { /** @type {boolean} */ let called; return wrapped /** * Call `middleware`. * @this {any} * @param {Array} parameters * @returns {void} */ function wrapped(...parameters) { const fnExpectsCallback = middleware.length > parameters.length; /** @type {any} */ let result; if (fnExpectsCallback) { parameters.push(done); } try { result = middleware.apply(this, parameters); } catch (error) { const exception = /** @type {Error} */ (error); // Well, this is quite the pickle. // `middleware` received a callback and called it synchronously, but that // threw an error. // The only thing left to do is to throw the thing instead. if (fnExpectsCallback && called) { throw exception } return done(exception) } if (!fnExpectsCallback) { if (result && result.then && typeof result.then === 'function') { result.then(then, done); } else if (result instanceof Error) { done(result); } else { then(result); } } } /** * Call `callback`, only once. * * @type {Callback} */ function done(error, ...output) { if (!called) { called = true; callback(error, ...output); } } /** * Call `done` with one value. * * @param {any} [value] */ function then(value) { done(null, value); } } /** * @typedef {import('unist').Node} Node * @typedef {import('unist').Point} Point * @typedef {import('unist').Position} Position */ /** * @typedef NodeLike * @property {string} type * @property {PositionLike | null | undefined} [position] * * @typedef PointLike * @property {number | null | undefined} [line] * @property {number | null | undefined} [column] * @property {number | null | undefined} [offset] * * @typedef PositionLike * @property {PointLike | null | undefined} [start] * @property {PointLike | null | undefined} [end] */ /** * Serialize the positional info of a point, position (start and end points), * or node. * * @param {Node | NodeLike | Point | PointLike | Position | PositionLike | null | undefined} [value] * Node, position, or point. * @returns {string} * Pretty printed positional info of a node (`string`). * * In the format of a range `ls:cs-le:ce` (when given `node` or `position`) * or a point `l:c` (when given `point`), where `l` stands for line, `c` for * column, `s` for `start`, and `e` for end. * An empty string (`''`) is returned if the given value is neither `node`, * `position`, nor `point`. */ function stringifyPosition(value) { // Nothing. if (!value || typeof value !== 'object') { return '' } // Node. if ('position' in value || 'type' in value) { return position(value.position) } // Position. if ('start' in value || 'end' in value) { return position(value) } // Point. if ('line' in value || 'column' in value) { return point$1(value) } // ? return '' } /** * @param {Point | PointLike | null | undefined} point * @returns {string} */ function point$1(point) { return index(point && point.line) + ':' + index(point && point.column) } /** * @param {Position | PositionLike | null | undefined} pos * @returns {string} */ function position(pos) { return point$1(pos && pos.start) + '-' + point$1(pos && pos.end) } /** * @param {number | null | undefined} value * @returns {number} */ function index(value) { return value && typeof value === 'number' ? value : 1 } /** * @typedef {import('unist').Node} Node * @typedef {import('unist').Point} Point * @typedef {import('unist').Position} Position */ /** * Message. */ class VFileMessage extends Error { /** * Create a message for `reason`. * * > 🪦 **Note**: also has obsolete signatures. * * @overload * @param {string} reason * @param {Options | null | undefined} [options] * @returns * * @overload * @param {string} reason * @param {Node | NodeLike | null | undefined} parent * @param {string | null | undefined} [origin] * @returns * * @overload * @param {string} reason * @param {Point | Position | null | undefined} place * @param {string | null | undefined} [origin] * @returns * * @overload * @param {string} reason * @param {string | null | undefined} [origin] * @returns * * @overload * @param {Error | VFileMessage} cause * @param {Node | NodeLike | null | undefined} parent * @param {string | null | undefined} [origin] * @returns * * @overload * @param {Error | VFileMessage} cause * @param {Point | Position | null | undefined} place * @param {string | null | undefined} [origin] * @returns * * @overload * @param {Error | VFileMessage} cause * @param {string | null | undefined} [origin] * @returns * * @param {Error | VFileMessage | string} causeOrReason * Reason for message, should use markdown. * @param {Node | NodeLike | Options | Point | Position | string | null | undefined} [optionsOrParentOrPlace] * Configuration (optional). * @param {string | null | undefined} [origin] * Place in code where the message originates (example: * `'my-package:my-rule'` or `'my-rule'`). * @returns * Instance of `VFileMessage`. */ // eslint-disable-next-line complexity constructor(causeOrReason, optionsOrParentOrPlace, origin) { super(); if (typeof optionsOrParentOrPlace === 'string') { origin = optionsOrParentOrPlace; optionsOrParentOrPlace = undefined; } /** @type {string} */ let reason = ''; /** @type {Options} */ let options = {}; let legacyCause = false; if (optionsOrParentOrPlace) { // Point. if ( 'line' in optionsOrParentOrPlace && 'column' in optionsOrParentOrPlace ) { options = {place: optionsOrParentOrPlace}; } // Position. else if ( 'start' in optionsOrParentOrPlace && 'end' in optionsOrParentOrPlace ) { options = {place: optionsOrParentOrPlace}; } // Node. else if ('type' in optionsOrParentOrPlace) { options = { ancestors: [optionsOrParentOrPlace], place: optionsOrParentOrPlace.position }; } // Options. else { options = {...optionsOrParentOrPlace}; } } if (typeof causeOrReason === 'string') { reason = causeOrReason; } // Error. else if (!options.cause && causeOrReason) { legacyCause = true; reason = causeOrReason.message; options.cause = causeOrReason; } if (!options.ruleId && !options.source && typeof origin === 'string') { const index = origin.indexOf(':'); if (index === -1) { options.ruleId = origin; } else { options.source = origin.slice(0, index); options.ruleId = origin.slice(index + 1); } } if (!options.place && options.ancestors && options.ancestors) { const parent = options.ancestors[options.ancestors.length - 1]; if (parent) { options.place = parent.position; } } const start = options.place && 'start' in options.place ? options.place.start : options.place; /* eslint-disable no-unused-expressions */ /** * Stack of ancestor nodes surrounding the message. * * @type {Array | undefined} */ this.ancestors = options.ancestors || undefined; /** * Original error cause of the message. * * @type {Error | undefined} */ this.cause = options.cause || undefined; /** * Starting column of message. * * @type {number | undefined} */ this.column = start ? start.column : undefined; /** * State of problem. * * * `true` — error, file not usable * * `false` — warning, change may be needed * * `undefined` — change likely not needed * * @type {boolean | null | undefined} */ this.fatal = undefined; /** * Path of a file (used throughout the `VFile` ecosystem). * * @type {string | undefined} */ this.file; // Field from `Error`. /** * Reason for message. * * @type {string} */ this.message = reason; /** * Starting line of error. * * @type {number | undefined} */ this.line = start ? start.line : undefined; // Field from `Error`. /** * Serialized positional info of message. * * On normal errors, this would be something like `ParseError`, buit in * `VFile` messages we use this space to show where an error happened. */ this.name = stringifyPosition(options.place) || '1:1'; /** * Place of message. * * @type {Point | Position | undefined} */ this.place = options.place || undefined; /** * Reason for message, should use markdown. * * @type {string} */ this.reason = this.message; /** * Category of message (example: `'my-rule'`). * * @type {string | undefined} */ this.ruleId = options.ruleId || undefined; /** * Namespace of message (example: `'my-package'`). * * @type {string | undefined} */ this.source = options.source || undefined; // Field from `Error`. /** * Stack of message. * * This is used by normal errors to show where something happened in * programming code, irrelevant for `VFile` messages, * * @type {string} */ this.stack = legacyCause && options.cause && typeof options.cause.stack === 'string' ? options.cause.stack : ''; // The following fields are “well known”. // Not standard. // Feel free to add other non-standard fields to your messages. /** * Specify the source value that’s being reported, which is deemed * incorrect. * * @type {string | undefined} */ this.actual; /** * Suggest acceptable values that can be used instead of `actual`. * * @type {Array | undefined} */ this.expected; /** * Long form description of the message (you should use markdown). * * @type {string | undefined} */ this.note; /** * Link to docs for the message. * * > 👉 **Note**: this must be an absolute URL that can be passed as `x` * > to `new URL(x)`. * * @type {string | undefined} */ this.url; /* eslint-enable no-unused-expressions */ } } VFileMessage.prototype.file = ''; VFileMessage.prototype.name = ''; VFileMessage.prototype.reason = ''; VFileMessage.prototype.message = ''; VFileMessage.prototype.stack = ''; VFileMessage.prototype.column = undefined; VFileMessage.prototype.line = undefined; VFileMessage.prototype.ancestors = undefined; VFileMessage.prototype.cause = undefined; VFileMessage.prototype.fatal = undefined; VFileMessage.prototype.place = undefined; VFileMessage.prototype.ruleId = undefined; VFileMessage.prototype.source = undefined; /** * Checks if a value has the shape of a WHATWG URL object. * * Using a symbol or instanceof would not be able to recognize URL objects * coming from other implementations (e.g. in Electron), so instead we are * checking some well known properties for a lack of a better test. * * We use `href` and `protocol` as they are the only properties that are * easy to retrieve and calculate due to the lazy nature of the getters. * * We check for auth attribute to distinguish legacy url instance with * WHATWG URL instance. * * @param {unknown} fileUrlOrPath * File path or URL. * @returns {fileUrlOrPath is URL} * Whether it’s a URL. */ // From: function isUrl(fileUrlOrPath) { return Boolean( fileUrlOrPath !== null && typeof fileUrlOrPath === 'object' && 'href' in fileUrlOrPath && fileUrlOrPath.href && 'protocol' in fileUrlOrPath && fileUrlOrPath.protocol && // @ts-expect-error: indexing is fine. fileUrlOrPath.auth === undefined ) } /** * @import {Node, Point, Position} from 'unist' * @import {Options as MessageOptions} from 'vfile-message' * @import {Compatible, Data, Map, Options, Value} from 'vfile' */ /** * Order of setting (least specific to most), we need this because otherwise * `{stem: 'a', path: '~/b.js'}` would throw, as a path is needed before a * stem can be set. */ const order = /** @type {const} */ ([ 'history', 'path', 'basename', 'stem', 'extname', 'dirname' ]); class VFile { /** * Create a new virtual file. * * `options` is treated as: * * * `string` or `Uint8Array` — `{value: options}` * * `URL` — `{path: options}` * * `VFile` — shallow copies its data over to the new file * * `object` — all fields are shallow copied over to the new file * * Path related fields are set in the following order (least specific to * most specific): `history`, `path`, `basename`, `stem`, `extname`, * `dirname`. * * You cannot set `dirname` or `extname` without setting either `history`, * `path`, `basename`, or `stem` too. * * @param {Compatible | null | undefined} [value] * File value. * @returns * New instance. */ constructor(value) { /** @type {Options | VFile} */ let options; if (!value) { options = {}; } else if (isUrl(value)) { options = {path: value}; } else if (typeof value === 'string' || isUint8Array$1(value)) { options = {value}; } else { options = value; } /* eslint-disable no-unused-expressions */ /** * Base of `path` (default: `process.cwd()` or `'/'` in browsers). * * @type {string} */ // Prevent calling `cwd` (which could be expensive) if it’s not needed; // the empty string will be overridden in the next block. this.cwd = 'cwd' in options ? '' : process$1.cwd(); /** * Place to store custom info (default: `{}`). * * It’s OK to store custom data directly on the file but moving it to * `data` is recommended. * * @type {Data} */ this.data = {}; /** * List of file paths the file moved between. * * The first is the original path and the last is the current path. * * @type {Array} */ this.history = []; /** * List of messages associated with the file. * * @type {Array} */ this.messages = []; /** * Raw value. * * @type {Value} */ this.value; // The below are non-standard, they are “well-known”. // As in, used in several tools. /** * Source map. * * This type is equivalent to the `RawSourceMap` type from the `source-map` * module. * * @type {Map | null | undefined} */ this.map; /** * Custom, non-string, compiled, representation. * * This is used by unified to store non-string results. * One example is when turning markdown into React nodes. * * @type {unknown} */ this.result; /** * Whether a file was saved to disk. * * This is used by vfile reporters. * * @type {boolean} */ this.stored; /* eslint-enable no-unused-expressions */ // Set path related properties in the correct order. let index = -1; while (++index < order.length) { const field = order[index]; // Note: we specifically use `in` instead of `hasOwnProperty` to accept // `vfile`s too. if ( field in options && options[field] !== undefined && options[field] !== null ) { // @ts-expect-error: TS doesn’t understand basic reality. this[field] = field === 'history' ? [...options[field]] : options[field]; } } /** @type {string} */ let field; // Set non-path related properties. for (field in options) { // @ts-expect-error: fine to set other things. if (!order.includes(field)) { // @ts-expect-error: fine to set other things. this[field] = options[field]; } } } /** * Get the basename (including extname) (example: `'index.min.js'`). * * @returns {string | undefined} * Basename. */ get basename() { return typeof this.path === 'string' ? path.basename(this.path) : undefined } /** * Set basename (including extname) (`'index.min.js'`). * * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` * on windows). * Cannot be nullified (use `file.path = file.dirname` instead). * * @param {string} basename * Basename. * @returns {undefined} * Nothing. */ set basename(basename) { assertNonEmpty(basename, 'basename'); assertPart(basename, 'basename'); this.path = path.join(this.dirname || '', basename); } /** * Get the parent path (example: `'~'`). * * @returns {string | undefined} * Dirname. */ get dirname() { return typeof this.path === 'string' ? path.dirname(this.path) : undefined } /** * Set the parent path (example: `'~'`). * * Cannot be set if there’s no `path` yet. * * @param {string | undefined} dirname * Dirname. * @returns {undefined} * Nothing. */ set dirname(dirname) { assertPath(this.basename, 'dirname'); this.path = path.join(dirname || '', this.basename); } /** * Get the extname (including dot) (example: `'.js'`). * * @returns {string | undefined} * Extname. */ get extname() { return typeof this.path === 'string' ? path.extname(this.path) : undefined } /** * Set the extname (including dot) (example: `'.js'`). * * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` * on windows). * Cannot be set if there’s no `path` yet. * * @param {string | undefined} extname * Extname. * @returns {undefined} * Nothing. */ set extname(extname) { assertPart(extname, 'extname'); assertPath(this.dirname, 'extname'); if (extname) { if (extname.codePointAt(0) !== 46 /* `.` */) { throw new Error('`extname` must start with `.`') } if (extname.includes('.', 1)) { throw new Error('`extname` cannot contain multiple dots') } } this.path = path.join(this.dirname, this.stem + (extname || '')); } /** * Get the full path (example: `'~/index.min.js'`). * * @returns {string} * Path. */ get path() { return this.history[this.history.length - 1] } /** * Set the full path (example: `'~/index.min.js'`). * * Cannot be nullified. * You can set a file URL (a `URL` object with a `file:` protocol) which will * be turned into a path with `url.fileURLToPath`. * * @param {URL | string} path * Path. * @returns {undefined} * Nothing. */ set path(path) { if (isUrl(path)) { path = node_url.fileURLToPath(path); } assertNonEmpty(path, 'path'); if (this.path !== path) { this.history.push(path); } } /** * Get the stem (basename w/o extname) (example: `'index.min'`). * * @returns {string | undefined} * Stem. */ get stem() { return typeof this.path === 'string' ? path.basename(this.path, this.extname) : undefined } /** * Set the stem (basename w/o extname) (example: `'index.min'`). * * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` * on windows). * Cannot be nullified (use `file.path = file.dirname` instead). * * @param {string} stem * Stem. * @returns {undefined} * Nothing. */ set stem(stem) { assertNonEmpty(stem, 'stem'); assertPart(stem, 'stem'); this.path = path.join(this.dirname || '', stem + (this.extname || '')); } // Normal prototypal methods. /** * Create a fatal message for `reason` associated with the file. * * The `fatal` field of the message is set to `true` (error; file not usable) * and the `file` field is set to the current file path. * The message is added to the `messages` field on `file`. * * > 🪦 **Note**: also has obsolete signatures. * * @overload * @param {string} reason * @param {MessageOptions | null | undefined} [options] * @returns {never} * * @overload * @param {string} reason * @param {Node | NodeLike | null | undefined} parent * @param {string | null | undefined} [origin] * @returns {never} * * @overload * @param {string} reason * @param {Point | Position | null | undefined} place * @param {string | null | undefined} [origin] * @returns {never} * * @overload * @param {string} reason * @param {string | null | undefined} [origin] * @returns {never} * * @overload * @param {Error | VFileMessage} cause * @param {Node | NodeLike | null | undefined} parent * @param {string | null | undefined} [origin] * @returns {never} * * @overload * @param {Error | VFileMessage} cause * @param {Point | Position | null | undefined} place * @param {string | null | undefined} [origin] * @returns {never} * * @overload * @param {Error | VFileMessage} cause * @param {string | null | undefined} [origin] * @returns {never} * * @param {Error | VFileMessage | string} causeOrReason * Reason for message, should use markdown. * @param {Node | NodeLike | MessageOptions | Point | Position | string | null | undefined} [optionsOrParentOrPlace] * Configuration (optional). * @param {string | null | undefined} [origin] * Place in code where the message originates (example: * `'my-package:my-rule'` or `'my-rule'`). * @returns {never} * Never. * @throws {VFileMessage} * Message. */ fail(causeOrReason, optionsOrParentOrPlace, origin) { // @ts-expect-error: the overloads are fine. const message = this.message(causeOrReason, optionsOrParentOrPlace, origin); message.fatal = true; throw message } /** * Create an info message for `reason` associated with the file. * * The `fatal` field of the message is set to `undefined` (info; change * likely not needed) and the `file` field is set to the current file path. * The message is added to the `messages` field on `file`. * * > 🪦 **Note**: also has obsolete signatures. * * @overload * @param {string} reason * @param {MessageOptions | null | undefined} [options] * @returns {VFileMessage} * * @overload * @param {string} reason * @param {Node | NodeLike | null | undefined} parent * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {string} reason * @param {Point | Position | null | undefined} place * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {string} reason * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {Error | VFileMessage} cause * @param {Node | NodeLike | null | undefined} parent * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {Error | VFileMessage} cause * @param {Point | Position | null | undefined} place * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {Error | VFileMessage} cause * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @param {Error | VFileMessage | string} causeOrReason * Reason for message, should use markdown. * @param {Node | NodeLike | MessageOptions | Point | Position | string | null | undefined} [optionsOrParentOrPlace] * Configuration (optional). * @param {string | null | undefined} [origin] * Place in code where the message originates (example: * `'my-package:my-rule'` or `'my-rule'`). * @returns {VFileMessage} * Message. */ info(causeOrReason, optionsOrParentOrPlace, origin) { // @ts-expect-error: the overloads are fine. const message = this.message(causeOrReason, optionsOrParentOrPlace, origin); message.fatal = undefined; return message } /** * Create a message for `reason` associated with the file. * * The `fatal` field of the message is set to `false` (warning; change may be * needed) and the `file` field is set to the current file path. * The message is added to the `messages` field on `file`. * * > 🪦 **Note**: also has obsolete signatures. * * @overload * @param {string} reason * @param {MessageOptions | null | undefined} [options] * @returns {VFileMessage} * * @overload * @param {string} reason * @param {Node | NodeLike | null | undefined} parent * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {string} reason * @param {Point | Position | null | undefined} place * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {string} reason * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {Error | VFileMessage} cause * @param {Node | NodeLike | null | undefined} parent * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {Error | VFileMessage} cause * @param {Point | Position | null | undefined} place * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @overload * @param {Error | VFileMessage} cause * @param {string | null | undefined} [origin] * @returns {VFileMessage} * * @param {Error | VFileMessage | string} causeOrReason * Reason for message, should use markdown. * @param {Node | NodeLike | MessageOptions | Point | Position | string | null | undefined} [optionsOrParentOrPlace] * Configuration (optional). * @param {string | null | undefined} [origin] * Place in code where the message originates (example: * `'my-package:my-rule'` or `'my-rule'`). * @returns {VFileMessage} * Message. */ message(causeOrReason, optionsOrParentOrPlace, origin) { const message = new VFileMessage( // @ts-expect-error: the overloads are fine. causeOrReason, optionsOrParentOrPlace, origin ); if (this.path) { message.name = this.path + ':' + message.name; message.file = this.path; } message.fatal = false; this.messages.push(message); return message } /** * Serialize the file. * * > **Note**: which encodings are supported depends on the engine. * > For info on Node.js, see: * > . * * @param {string | null | undefined} [encoding='utf8'] * Character encoding to understand `value` as when it’s a `Uint8Array` * (default: `'utf-8'`). * @returns {string} * Serialized file. */ toString(encoding) { if (this.value === undefined) { return '' } if (typeof this.value === 'string') { return this.value } const decoder = new TextDecoder(encoding || undefined); return decoder.decode(this.value) } } /** * Assert that `part` is not a path (as in, does not contain `path.sep`). * * @param {string | null | undefined} part * File path part. * @param {string} name * Part name. * @returns {undefined} * Nothing. */ function assertPart(part, name) { if (part && part.includes(path.sep)) { throw new Error( '`' + name + '` cannot be a path: did not expect `' + path.sep + '`' ) } } /** * Assert that `part` is not empty. * * @param {string | undefined} part * Thing. * @param {string} name * Part name. * @returns {asserts part is string} * Nothing. */ function assertNonEmpty(part, name) { if (!part) { throw new Error('`' + name + '` cannot be empty') } } /** * Assert `path` exists. * * @param {string | undefined} path * Path. * @param {string} name * Dependency name. * @returns {asserts path is string} * Nothing. */ function assertPath(path, name) { if (!path) { throw new Error('Setting `' + name + '` requires `path` to be set too') } } /** * Assert `value` is an `Uint8Array`. * * @param {unknown} value * thing. * @returns {value is Uint8Array} * Whether `value` is an `Uint8Array`. */ function isUint8Array$1(value) { return Boolean( value && typeof value === 'object' && 'byteLength' in value && 'byteOffset' in value ) } const CallableInstance = /** * @type {new , Result>(property: string | symbol) => (...parameters: Parameters) => Result} */ ( /** @type {unknown} */ ( /** * @this {Function} * @param {string | symbol} property * @returns {(...parameters: Array) => unknown} */ function (property) { const self = this; const constr = self.constructor; const proto = /** @type {Record} */ ( // Prototypes do exist. // type-coverage:ignore-next-line constr.prototype ); const func = proto[property]; /** @type {(...parameters: Array) => unknown} */ const apply = function () { return func.apply(apply, arguments) }; Object.setPrototypeOf(apply, proto); const names = Object.getOwnPropertyNames(func); for (const p of names) { const descriptor = Object.getOwnPropertyDescriptor(func, p); if (descriptor) Object.defineProperty(apply, p, descriptor); } return apply } ) ); /** * @typedef {import('trough').Pipeline} Pipeline * * @typedef {import('unist').Node} Node * * @typedef {import('vfile').Compatible} Compatible * @typedef {import('vfile').Value} Value * * @typedef {import('../index.js').CompileResultMap} CompileResultMap * @typedef {import('../index.js').Data} Data */ // To do: next major: drop `Compiler`, `Parser`: prefer lowercase. // To do: we could start yielding `never` in TS when a parser is missing and // `parse` is called. // Currently, we allow directly setting `processor.parser`, which is untyped. const own$5 = {}.hasOwnProperty; /** * @template {Node | undefined} [ParseTree=undefined] * Output of `parse` (optional). * @template {Node | undefined} [HeadTree=undefined] * Input for `run` (optional). * @template {Node | undefined} [TailTree=undefined] * Output for `run` (optional). * @template {Node | undefined} [CompileTree=undefined] * Input of `stringify` (optional). * @template {CompileResults | undefined} [CompileResult=undefined] * Output of `stringify` (optional). * @extends {CallableInstance<[], Processor>} */ class Processor extends CallableInstance { /** * Create a processor. */ constructor() { // If `Processor()` is called (w/o new), `copy` is called instead. super('copy'); /** * Compiler to use (deprecated). * * @deprecated * Use `compiler` instead. * @type {( * Compiler< * CompileTree extends undefined ? Node : CompileTree, * CompileResult extends undefined ? CompileResults : CompileResult * > | * undefined * )} */ this.Compiler = undefined; /** * Parser to use (deprecated). * * @deprecated * Use `parser` instead. * @type {( * Parser | * undefined * )} */ this.Parser = undefined; // Note: the following fields are considered private. // However, they are needed for tests, and TSC generates an untyped // `private freezeIndex` field for, which trips `type-coverage` up. // Instead, we use `@deprecated` to visualize that they shouldn’t be used. /** * Internal list of configured plugins. * * @deprecated * This is a private internal property and should not be used. * @type {Array>>} */ this.attachers = []; /** * Compiler to use. * * @type {( * Compiler< * CompileTree extends undefined ? Node : CompileTree, * CompileResult extends undefined ? CompileResults : CompileResult * > | * undefined * )} */ this.compiler = undefined; /** * Internal state to track where we are while freezing. * * @deprecated * This is a private internal property and should not be used. * @type {number} */ this.freezeIndex = -1; /** * Internal state to track whether we’re frozen. * * @deprecated * This is a private internal property and should not be used. * @type {boolean | undefined} */ this.frozen = undefined; /** * Internal state. * * @deprecated * This is a private internal property and should not be used. * @type {Data} */ this.namespace = {}; /** * Parser to use. * * @type {( * Parser | * undefined * )} */ this.parser = undefined; /** * Internal list of configured transformers. * * @deprecated * This is a private internal property and should not be used. * @type {Pipeline} */ this.transformers = trough(); } /** * Copy a processor. * * @deprecated * This is a private internal method and should not be used. * @returns {Processor} * New *unfrozen* processor ({@link Processor `Processor`}) that is * configured to work the same as its ancestor. * When the descendant processor is configured in the future it does not * affect the ancestral processor. */ copy() { // Cast as the type parameters will be the same after attaching. const destination = /** @type {Processor} */ ( new Processor() ); let index = -1; while (++index < this.attachers.length) { const attacher = this.attachers[index]; destination.use(...attacher); } destination.data(structuredClone$1(this.namespace)); return destination } /** * Configure the processor with info available to all plugins. * Information is stored in an object. * * Typically, options can be given to a specific plugin, but sometimes it * makes sense to have information shared with several plugins. * For example, a list of HTML elements that are self-closing, which is * needed during all phases. * * > 👉 **Note**: setting information cannot occur on *frozen* processors. * > Call the processor first to create a new unfrozen processor. * * > 👉 **Note**: to register custom data in TypeScript, augment the * > {@link Data `Data`} interface. * * @example * This example show how to get and set info: * * ```js * import {unified} from 'unified' * * const processor = unified().data('alpha', 'bravo') * * processor.data('alpha') // => 'bravo' * * processor.data() // => {alpha: 'bravo'} * * processor.data({charlie: 'delta'}) * * processor.data() // => {charlie: 'delta'} * ``` * * @template {keyof Data} Key * * @overload * @returns {Data} * * @overload * @param {Data} dataset * @returns {Processor} * * @overload * @param {Key} key * @returns {Data[Key]} * * @overload * @param {Key} key * @param {Data[Key]} value * @returns {Processor} * * @param {Data | Key} [key] * Key to get or set, or entire dataset to set, or nothing to get the * entire dataset (optional). * @param {Data[Key]} [value] * Value to set (optional). * @returns {unknown} * The current processor when setting, the value at `key` when getting, or * the entire dataset when getting without key. */ data(key, value) { if (typeof key === 'string') { // Set `key`. if (arguments.length === 2) { assertUnfrozen('data', this.frozen); this.namespace[key] = value; return this } // Get `key`. return (own$5.call(this.namespace, key) && this.namespace[key]) || undefined } // Set space. if (key) { assertUnfrozen('data', this.frozen); this.namespace = key; return this } // Get space. return this.namespace } /** * Freeze a processor. * * Frozen processors are meant to be extended and not to be configured * directly. * * When a processor is frozen it cannot be unfrozen. * New processors working the same way can be created by calling the * processor. * * It’s possible to freeze processors explicitly by calling `.freeze()`. * Processors freeze automatically when `.parse()`, `.run()`, `.runSync()`, * `.stringify()`, `.process()`, or `.processSync()` are called. * * @returns {Processor} * The current processor. */ freeze() { if (this.frozen) { return this } // Cast so that we can type plugins easier. // Plugins are supposed to be usable on different processors, not just on // this exact processor. const self = /** @type {Processor} */ (/** @type {unknown} */ (this)); while (++this.freezeIndex < this.attachers.length) { const [attacher, ...options] = this.attachers[this.freezeIndex]; if (options[0] === false) { continue } if (options[0] === true) { options[0] = undefined; } const transformer = attacher.call(self, ...options); if (typeof transformer === 'function') { this.transformers.use(transformer); } } this.frozen = true; this.freezeIndex = Number.POSITIVE_INFINITY; return this } /** * Parse text to a syntax tree. * * > 👉 **Note**: `parse` freezes the processor if not already *frozen*. * * > 👉 **Note**: `parse` performs the parse phase, not the run phase or other * > phases. * * @param {Compatible | undefined} [file] * file to parse (optional); typically `string` or `VFile`; any value * accepted as `x` in `new VFile(x)`. * @returns {ParseTree extends undefined ? Node : ParseTree} * Syntax tree representing `file`. */ parse(file) { this.freeze(); const realFile = vfile(file); const parser = this.parser || this.Parser; assertParser('parse', parser); return parser(String(realFile), realFile) } /** * Process the given file as configured on the processor. * * > 👉 **Note**: `process` freezes the processor if not already *frozen*. * * > 👉 **Note**: `process` performs the parse, run, and stringify phases. * * @overload * @param {Compatible | undefined} file * @param {ProcessCallback>} done * @returns {undefined} * * @overload * @param {Compatible | undefined} [file] * @returns {Promise>} * * @param {Compatible | undefined} [file] * File (optional); typically `string` or `VFile`]; any value accepted as * `x` in `new VFile(x)`. * @param {ProcessCallback> | undefined} [done] * Callback (optional). * @returns {Promise | undefined} * Nothing if `done` is given. * Otherwise a promise, rejected with a fatal error or resolved with the * processed file. * * The parsed, transformed, and compiled value is available at * `file.value` (see note). * * > 👉 **Note**: unified typically compiles by serializing: most * > compilers return `string` (or `Uint8Array`). * > Some compilers, such as the one configured with * > [`rehype-react`][rehype-react], return other values (in this case, a * > React tree). * > If you’re using a compiler that doesn’t serialize, expect different * > result values. * > * > To register custom results in TypeScript, add them to * > {@link CompileResultMap `CompileResultMap`}. * * [rehype-react]: https://github.com/rehypejs/rehype-react */ process(file, done) { const self = this; this.freeze(); assertParser('process', this.parser || this.Parser); assertCompiler('process', this.compiler || this.Compiler); return done ? executor(undefined, done) : new Promise(executor) // Note: `void`s needed for TS. /** * @param {((file: VFileWithOutput) => undefined | void) | undefined} resolve * @param {(error: Error | undefined) => undefined | void} reject * @returns {undefined} */ function executor(resolve, reject) { const realFile = vfile(file); // Assume `ParseTree` (the result of the parser) matches `HeadTree` (the // input of the first transform). const parseTree = /** @type {HeadTree extends undefined ? Node : HeadTree} */ ( /** @type {unknown} */ (self.parse(realFile)) ); self.run(parseTree, realFile, function (error, tree, file) { if (error || !tree || !file) { return realDone(error) } // Assume `TailTree` (the output of the last transform) matches // `CompileTree` (the input of the compiler). const compileTree = /** @type {CompileTree extends undefined ? Node : CompileTree} */ ( /** @type {unknown} */ (tree) ); const compileResult = self.stringify(compileTree, file); if (looksLikeAValue(compileResult)) { file.value = compileResult; } else { file.result = compileResult; } realDone(error, /** @type {VFileWithOutput} */ (file)); }); /** * @param {Error | undefined} error * @param {VFileWithOutput | undefined} [file] * @returns {undefined} */ function realDone(error, file) { if (error || !file) { reject(error); } else if (resolve) { resolve(file); } else { done(undefined, file); } } } } /** * Process the given file as configured on the processor. * * An error is thrown if asynchronous transforms are configured. * * > 👉 **Note**: `processSync` freezes the processor if not already *frozen*. * * > 👉 **Note**: `processSync` performs the parse, run, and stringify phases. * * @param {Compatible | undefined} [file] * File (optional); typically `string` or `VFile`; any value accepted as * `x` in `new VFile(x)`. * @returns {VFileWithOutput} * The processed file. * * The parsed, transformed, and compiled value is available at * `file.value` (see note). * * > 👉 **Note**: unified typically compiles by serializing: most * > compilers return `string` (or `Uint8Array`). * > Some compilers, such as the one configured with * > [`rehype-react`][rehype-react], return other values (in this case, a * > React tree). * > If you’re using a compiler that doesn’t serialize, expect different * > result values. * > * > To register custom results in TypeScript, add them to * > {@link CompileResultMap `CompileResultMap`}. * * [rehype-react]: https://github.com/rehypejs/rehype-react */ processSync(file) { /** @type {boolean} */ let complete = false; /** @type {VFileWithOutput | undefined} */ let result; this.freeze(); assertParser('processSync', this.parser || this.Parser); assertCompiler('processSync', this.compiler || this.Compiler); this.process(file, realDone); assertDone('processSync', 'process', complete); return result /** * @type {ProcessCallback>} */ function realDone(error, file) { complete = true; bail(error); result = file; } } /** * Run *transformers* on a syntax tree. * * > 👉 **Note**: `run` freezes the processor if not already *frozen*. * * > 👉 **Note**: `run` performs the run phase, not other phases. * * @overload * @param {HeadTree extends undefined ? Node : HeadTree} tree * @param {RunCallback} done * @returns {undefined} * * @overload * @param {HeadTree extends undefined ? Node : HeadTree} tree * @param {Compatible | undefined} file * @param {RunCallback} done * @returns {undefined} * * @overload * @param {HeadTree extends undefined ? Node : HeadTree} tree * @param {Compatible | undefined} [file] * @returns {Promise} * * @param {HeadTree extends undefined ? Node : HeadTree} tree * Tree to transform and inspect. * @param {( * RunCallback | * Compatible * )} [file] * File associated with `node` (optional); any value accepted as `x` in * `new VFile(x)`. * @param {RunCallback} [done] * Callback (optional). * @returns {Promise | undefined} * Nothing if `done` is given. * Otherwise, a promise rejected with a fatal error or resolved with the * transformed tree. */ run(tree, file, done) { assertNode(tree); this.freeze(); const transformers = this.transformers; if (!done && typeof file === 'function') { done = file; file = undefined; } return done ? executor(undefined, done) : new Promise(executor) // Note: `void`s needed for TS. /** * @param {( * ((tree: TailTree extends undefined ? Node : TailTree) => undefined | void) | * undefined * )} resolve * @param {(error: Error) => undefined | void} reject * @returns {undefined} */ function executor(resolve, reject) { const realFile = vfile(file); transformers.run(tree, realFile, realDone); /** * @param {Error | undefined} error * @param {Node} outputTree * @param {VFile} file * @returns {undefined} */ function realDone(error, outputTree, file) { const resultingTree = /** @type {TailTree extends undefined ? Node : TailTree} */ ( outputTree || tree ); if (error) { reject(error); } else if (resolve) { resolve(resultingTree); } else { done(undefined, resultingTree, file); } } } } /** * Run *transformers* on a syntax tree. * * An error is thrown if asynchronous transforms are configured. * * > 👉 **Note**: `runSync` freezes the processor if not already *frozen*. * * > 👉 **Note**: `runSync` performs the run phase, not other phases. * * @param {HeadTree extends undefined ? Node : HeadTree} tree * Tree to transform and inspect. * @param {Compatible | undefined} [file] * File associated with `node` (optional); any value accepted as `x` in * `new VFile(x)`. * @returns {TailTree extends undefined ? Node : TailTree} * Transformed tree. */ runSync(tree, file) { /** @type {boolean} */ let complete = false; /** @type {(TailTree extends undefined ? Node : TailTree) | undefined} */ let result; this.run(tree, file, realDone); assertDone('runSync', 'run', complete); return result /** * @type {RunCallback} */ function realDone(error, tree) { bail(error); result = tree; complete = true; } } /** * Compile a syntax tree. * * > 👉 **Note**: `stringify` freezes the processor if not already *frozen*. * * > 👉 **Note**: `stringify` performs the stringify phase, not the run phase * > or other phases. * * @param {CompileTree extends undefined ? Node : CompileTree} tree * Tree to compile. * @param {Compatible | undefined} [file] * File associated with `node` (optional); any value accepted as `x` in * `new VFile(x)`. * @returns {CompileResult extends undefined ? Value : CompileResult} * Textual representation of the tree (see note). * * > 👉 **Note**: unified typically compiles by serializing: most compilers * > return `string` (or `Uint8Array`). * > Some compilers, such as the one configured with * > [`rehype-react`][rehype-react], return other values (in this case, a * > React tree). * > If you’re using a compiler that doesn’t serialize, expect different * > result values. * > * > To register custom results in TypeScript, add them to * > {@link CompileResultMap `CompileResultMap`}. * * [rehype-react]: https://github.com/rehypejs/rehype-react */ stringify(tree, file) { this.freeze(); const realFile = vfile(file); const compiler = this.compiler || this.Compiler; assertCompiler('stringify', compiler); assertNode(tree); return compiler(tree, realFile) } /** * Configure the processor to use a plugin, a list of usable values, or a * preset. * * If the processor is already using a plugin, the previous plugin * configuration is changed based on the options that are passed in. * In other words, the plugin is not added a second time. * * > 👉 **Note**: `use` cannot be called on *frozen* processors. * > Call the processor first to create a new unfrozen processor. * * @example * There are many ways to pass plugins to `.use()`. * This example gives an overview: * * ```js * import {unified} from 'unified' * * unified() * // Plugin with options: * .use(pluginA, {x: true, y: true}) * // Passing the same plugin again merges configuration (to `{x: true, y: false, z: true}`): * .use(pluginA, {y: false, z: true}) * // Plugins: * .use([pluginB, pluginC]) * // Two plugins, the second with options: * .use([pluginD, [pluginE, {}]]) * // Preset with plugins and settings: * .use({plugins: [pluginF, [pluginG, {}]], settings: {position: false}}) * // Settings only: * .use({settings: {position: false}}) * ``` * * @template {Array} [Parameters=[]] * @template {Node | string | undefined} [Input=undefined] * @template [Output=Input] * * @overload * @param {Preset | null | undefined} [preset] * @returns {Processor} * * @overload * @param {PluggableList} list * @returns {Processor} * * @overload * @param {Plugin} plugin * @param {...(Parameters | [boolean])} parameters * @returns {UsePlugin} * * @param {PluggableList | Plugin | Preset | null | undefined} value * Usable value. * @param {...unknown} parameters * Parameters, when a plugin is given as a usable value. * @returns {Processor} * Current processor. */ use(value, ...parameters) { const attachers = this.attachers; const namespace = this.namespace; assertUnfrozen('use', this.frozen); if (value === null || value === undefined) ; else if (typeof value === 'function') { addPlugin(value, parameters); } else if (typeof value === 'object') { if (Array.isArray(value)) { addList(value); } else { addPreset(value); } } else { throw new TypeError('Expected usable value, not `' + value + '`') } return this /** * @param {Pluggable} value * @returns {undefined} */ function add(value) { if (typeof value === 'function') { addPlugin(value, []); } else if (typeof value === 'object') { if (Array.isArray(value)) { const [plugin, ...parameters] = /** @type {PluginTuple>} */ (value); addPlugin(plugin, parameters); } else { addPreset(value); } } else { throw new TypeError('Expected usable value, not `' + value + '`') } } /** * @param {Preset} result * @returns {undefined} */ function addPreset(result) { if (!('plugins' in result) && !('settings' in result)) { throw new Error( 'Expected usable value but received an empty preset, which is probably a mistake: presets typically come with `plugins` and sometimes with `settings`, but this has neither' ) } addList(result.plugins); if (result.settings) { namespace.settings = { ...namespace.settings, ...structuredClone$1(result.settings) }; } } /** * @param {PluggableList | null | undefined} plugins * @returns {undefined} */ function addList(plugins) { let index = -1; if (plugins === null || plugins === undefined) ; else if (Array.isArray(plugins)) { while (++index < plugins.length) { const thing = plugins[index]; add(thing); } } else { throw new TypeError('Expected a list of plugins, not `' + plugins + '`') } } /** * @param {Plugin} plugin * @param {Array} parameters * @returns {undefined} */ function addPlugin(plugin, parameters) { let index = -1; let entryIndex = -1; while (++index < attachers.length) { if (attachers[index][0] === plugin) { entryIndex = index; break } } if (entryIndex === -1) { attachers.push([plugin, ...parameters]); } // Only set if there was at least a `primary` value, otherwise we’d change // `arguments.length`. else if (parameters.length > 0) { let [primary, ...rest] = parameters; const currentPrimary = attachers[entryIndex][1]; if (isPlainObject(currentPrimary) && isPlainObject(primary)) { primary = structuredClone$1({...currentPrimary, ...primary}); } attachers[entryIndex] = [plugin, primary, ...rest]; } } } } // Note: this returns a *callable* instance. // That’s why it’s documented as a function. /** * Create a new processor. * * @example * This example shows how a new processor can be created (from `remark`) and linked * to **stdin**(4) and **stdout**(4). * * ```js * import process from 'node:process' * import concatStream from 'concat-stream' * import {remark} from 'remark' * * process.stdin.pipe( * concatStream(function (buf) { * process.stdout.write(String(remark().processSync(buf))) * }) * ) * ``` * * @returns * New *unfrozen* processor (`processor`). * * This processor is configured to work the same as its ancestor. * When the descendant processor is configured in the future it does not * affect the ancestral processor. */ const unified = new Processor().freeze(); /** * Assert a parser is available. * * @param {string} name * @param {unknown} value * @returns {asserts value is Parser} */ function assertParser(name, value) { if (typeof value !== 'function') { throw new TypeError('Cannot `' + name + '` without `parser`') } } /** * Assert a compiler is available. * * @param {string} name * @param {unknown} value * @returns {asserts value is Compiler} */ function assertCompiler(name, value) { if (typeof value !== 'function') { throw new TypeError('Cannot `' + name + '` without `compiler`') } } /** * Assert the processor is not frozen. * * @param {string} name * @param {unknown} frozen * @returns {asserts frozen is false} */ function assertUnfrozen(name, frozen) { if (frozen) { throw new Error( 'Cannot call `' + name + '` on a frozen processor.\nCreate a new processor first, by calling it: use `processor()` instead of `processor`.' ) } } /** * Assert `node` is a unist node. * * @param {unknown} node * @returns {asserts node is Node} */ function assertNode(node) { // `isPlainObj` unfortunately uses `any` instead of `unknown`. // type-coverage:ignore-next-line if (!isPlainObject(node) || typeof node.type !== 'string') { throw new TypeError('Expected node, got `' + node + '`') // Fine. } } /** * Assert that `complete` is `true`. * * @param {string} name * @param {string} asyncName * @param {unknown} complete * @returns {asserts complete is true} */ function assertDone(name, asyncName, complete) { if (!complete) { throw new Error( '`' + name + '` finished async. Use `' + asyncName + '` instead' ) } } /** * @param {Compatible | undefined} [value] * @returns {VFile} */ function vfile(value) { return looksLikeAVFile(value) ? value : new VFile(value) } /** * @param {Compatible | undefined} [value] * @returns {value is VFile} */ function looksLikeAVFile(value) { return Boolean( value && typeof value === 'object' && 'message' in value && 'messages' in value ) } /** * @param {unknown} [value] * @returns {value is Value} */ function looksLikeAValue(value) { return typeof value === 'string' || isUint8Array(value) } /** * Assert `value` is an `Uint8Array`. * * @param {unknown} value * thing. * @returns {value is Uint8Array} * Whether `value` is an `Uint8Array`. */ function isUint8Array(value) { return Boolean( value && typeof value === 'object' && 'byteLength' in value && 'byteOffset' in value ) } /** * @typedef {import('mdast').Nodes} Nodes * * @typedef Options * Configuration (optional). * @property {boolean | null | undefined} [includeImageAlt=true] * Whether to use `alt` for `image`s (default: `true`). * @property {boolean | null | undefined} [includeHtml=true] * Whether to use `value` of HTML (default: `true`). */ /** @type {Options} */ const emptyOptions = {}; /** * Get the text content of a node or list of nodes. * * Prefers the node’s plain-text fields, otherwise serializes its children, * and if the given value is an array, serialize the nodes in it. * * @param {unknown} [value] * Thing to serialize, typically `Node`. * @param {Options | null | undefined} [options] * Configuration (optional). * @returns {string} * Serialized `value`. */ function toString$1(value, options) { const settings = emptyOptions; const includeImageAlt = typeof settings.includeImageAlt === 'boolean' ? settings.includeImageAlt : true; const includeHtml = typeof settings.includeHtml === 'boolean' ? settings.includeHtml : true; return one(value, includeImageAlt, includeHtml) } /** * One node or several nodes. * * @param {unknown} value * Thing to serialize. * @param {boolean} includeImageAlt * Include image `alt`s. * @param {boolean} includeHtml * Include HTML. * @returns {string} * Serialized node. */ function one(value, includeImageAlt, includeHtml) { if (node(value)) { if ('value' in value) { return value.type === 'html' && !includeHtml ? '' : value.value } if (includeImageAlt && 'alt' in value && value.alt) { return value.alt } if ('children' in value) { return all(value.children, includeImageAlt, includeHtml) } } if (Array.isArray(value)) { return all(value, includeImageAlt, includeHtml) } return '' } /** * Serialize a list of nodes. * * @param {Array} values * Thing to serialize. * @param {boolean} includeImageAlt * Include image `alt`s. * @param {boolean} includeHtml * Include HTML. * @returns {string} * Serialized nodes. */ function all(values, includeImageAlt, includeHtml) { /** @type {Array} */ const result = []; let index = -1; while (++index < values.length) { result[index] = one(values[index], includeImageAlt, includeHtml); } return result.join('') } /** * Check if `value` looks like a node. * * @param {unknown} value * Thing. * @returns {value is Nodes} * Whether `value` is a node. */ function node(value) { return Boolean(value && typeof value === 'object') } /** * Map of named character references. * * @type {Record} */ const characterEntities = { AElig: 'Æ', AMP: '&', Aacute: 'Á', Abreve: 'Ă', Acirc: 'Â', Acy: 'А', Afr: '𝔄', Agrave: 'À', Alpha: 'Α', Amacr: 'Ā', And: '⩓', Aogon: 'Ą', Aopf: '𝔸', ApplyFunction: '⁡', Aring: 'Å', Ascr: '𝒜', Assign: '≔', Atilde: 'Ã', Auml: 'Ä', Backslash: '∖', Barv: '⫧', Barwed: '⌆', Bcy: 'Б', Because: '∵', Bernoullis: 'ℬ', Beta: 'Β', Bfr: '𝔅', Bopf: '𝔹', Breve: '˘', Bscr: 'ℬ', Bumpeq: '≎', CHcy: 'Ч', COPY: '©', Cacute: 'Ć', Cap: '⋒', CapitalDifferentialD: 'ⅅ', Cayleys: 'ℭ', Ccaron: 'Č', Ccedil: 'Ç', Ccirc: 'Ĉ', Cconint: '∰', Cdot: 'Ċ', Cedilla: '¸', CenterDot: '·', Cfr: 'ℭ', Chi: 'Χ', CircleDot: '⊙', CircleMinus: '⊖', CirclePlus: '⊕', CircleTimes: '⊗', ClockwiseContourIntegral: '∲', CloseCurlyDoubleQuote: '”', CloseCurlyQuote: '’', Colon: '∷', Colone: '⩴', Congruent: '≡', Conint: '∯', ContourIntegral: '∮', Copf: 'ℂ', Coproduct: '∐', CounterClockwiseContourIntegral: '∳', Cross: '⨯', Cscr: '𝒞', Cup: '⋓', CupCap: '≍', DD: 'ⅅ', DDotrahd: '⤑', DJcy: 'Ђ', DScy: 'Ѕ', DZcy: 'Џ', Dagger: '‡', Darr: '↡', Dashv: '⫤', Dcaron: 'Ď', Dcy: 'Д', Del: '∇', Delta: 'Δ', Dfr: '𝔇', DiacriticalAcute: '´', DiacriticalDot: '˙', DiacriticalDoubleAcute: '˝', DiacriticalGrave: '`', DiacriticalTilde: '˜', Diamond: '⋄', DifferentialD: 'ⅆ', Dopf: '𝔻', Dot: '¨', DotDot: '⃜', DotEqual: '≐', DoubleContourIntegral: '∯', DoubleDot: '¨', DoubleDownArrow: '⇓', DoubleLeftArrow: '⇐', DoubleLeftRightArrow: '⇔', DoubleLeftTee: '⫤', DoubleLongLeftArrow: '⟸', DoubleLongLeftRightArrow: '⟺', DoubleLongRightArrow: '⟹', DoubleRightArrow: '⇒', DoubleRightTee: '⊨', DoubleUpArrow: '⇑', DoubleUpDownArrow: '⇕', DoubleVerticalBar: '∥', DownArrow: '↓', DownArrowBar: '⤓', DownArrowUpArrow: '⇵', DownBreve: '̑', DownLeftRightVector: '⥐', DownLeftTeeVector: '⥞', DownLeftVector: '↽', DownLeftVectorBar: '⥖', DownRightTeeVector: '⥟', DownRightVector: '⇁', DownRightVectorBar: '⥗', DownTee: '⊤', DownTeeArrow: '↧', Downarrow: '⇓', Dscr: '𝒟', Dstrok: 'Đ', ENG: 'Ŋ', ETH: 'Ð', Eacute: 'É', Ecaron: 'Ě', Ecirc: 'Ê', Ecy: 'Э', Edot: 'Ė', Efr: '𝔈', Egrave: 'È', Element: '∈', Emacr: 'Ē', EmptySmallSquare: '◻', EmptyVerySmallSquare: '▫', Eogon: 'Ę', Eopf: '𝔼', Epsilon: 'Ε', Equal: '⩵', EqualTilde: '≂', Equilibrium: '⇌', Escr: 'ℰ', Esim: '⩳', Eta: 'Η', Euml: 'Ë', Exists: '∃', ExponentialE: 'ⅇ', Fcy: 'Ф', Ffr: '𝔉', FilledSmallSquare: '◼', FilledVerySmallSquare: '▪', Fopf: '𝔽', ForAll: '∀', Fouriertrf: 'ℱ', Fscr: 'ℱ', GJcy: 'Ѓ', GT: '>', Gamma: 'Γ', Gammad: 'Ϝ', Gbreve: 'Ğ', Gcedil: 'Ģ', Gcirc: 'Ĝ', Gcy: 'Г', Gdot: 'Ġ', Gfr: '𝔊', Gg: '⋙', Gopf: '𝔾', GreaterEqual: '≥', GreaterEqualLess: '⋛', GreaterFullEqual: '≧', GreaterGreater: '⪢', GreaterLess: '≷', GreaterSlantEqual: '⩾', GreaterTilde: '≳', Gscr: '𝒢', Gt: '≫', HARDcy: 'Ъ', Hacek: 'ˇ', Hat: '^', Hcirc: 'Ĥ', Hfr: 'ℌ', HilbertSpace: 'ℋ', Hopf: 'ℍ', HorizontalLine: '─', Hscr: 'ℋ', Hstrok: 'Ħ', HumpDownHump: '≎', HumpEqual: '≏', IEcy: 'Е', IJlig: 'IJ', IOcy: 'Ё', Iacute: 'Í', Icirc: 'Î', Icy: 'И', Idot: 'İ', Ifr: 'ℑ', Igrave: 'Ì', Im: 'ℑ', Imacr: 'Ī', ImaginaryI: 'ⅈ', Implies: '⇒', Int: '∬', Integral: '∫', Intersection: '⋂', InvisibleComma: '⁣', InvisibleTimes: '⁢', Iogon: 'Į', Iopf: '𝕀', Iota: 'Ι', Iscr: 'ℐ', Itilde: 'Ĩ', Iukcy: 'І', Iuml: 'Ï', Jcirc: 'Ĵ', Jcy: 'Й', Jfr: '𝔍', Jopf: '𝕁', Jscr: '𝒥', Jsercy: 'Ј', Jukcy: 'Є', KHcy: 'Х', KJcy: 'Ќ', Kappa: 'Κ', Kcedil: 'Ķ', Kcy: 'К', Kfr: '𝔎', Kopf: '𝕂', Kscr: '𝒦', LJcy: 'Љ', LT: '<', Lacute: 'Ĺ', Lambda: 'Λ', Lang: '⟪', Laplacetrf: 'ℒ', Larr: '↞', Lcaron: 'Ľ', Lcedil: 'Ļ', Lcy: 'Л', LeftAngleBracket: '⟨', LeftArrow: '←', LeftArrowBar: '⇤', LeftArrowRightArrow: '⇆', LeftCeiling: '⌈', LeftDoubleBracket: '⟦', LeftDownTeeVector: '⥡', LeftDownVector: '⇃', LeftDownVectorBar: '⥙', LeftFloor: '⌊', LeftRightArrow: '↔', LeftRightVector: '⥎', LeftTee: '⊣', LeftTeeArrow: '↤', LeftTeeVector: '⥚', LeftTriangle: '⊲', LeftTriangleBar: '⧏', LeftTriangleEqual: '⊴', LeftUpDownVector: '⥑', LeftUpTeeVector: '⥠', LeftUpVector: '↿', LeftUpVectorBar: '⥘', LeftVector: '↼', LeftVectorBar: '⥒', Leftarrow: '⇐', Leftrightarrow: '⇔', LessEqualGreater: '⋚', LessFullEqual: '≦', LessGreater: '≶', LessLess: '⪡', LessSlantEqual: '⩽', LessTilde: '≲', Lfr: '𝔏', Ll: '⋘', Lleftarrow: '⇚', Lmidot: 'Ŀ', LongLeftArrow: '⟵', LongLeftRightArrow: '⟷', LongRightArrow: '⟶', Longleftarrow: '⟸', Longleftrightarrow: '⟺', Longrightarrow: '⟹', Lopf: '𝕃', LowerLeftArrow: '↙', LowerRightArrow: '↘', Lscr: 'ℒ', Lsh: '↰', Lstrok: 'Ł', Lt: '≪', Map: '⤅', Mcy: 'М', MediumSpace: ' ', Mellintrf: 'ℳ', Mfr: '𝔐', MinusPlus: '∓', Mopf: '𝕄', Mscr: 'ℳ', Mu: 'Μ', NJcy: 'Њ', Nacute: 'Ń', Ncaron: 'Ň', Ncedil: 'Ņ', Ncy: 'Н', NegativeMediumSpace: '​', NegativeThickSpace: '​', NegativeThinSpace: '​', NegativeVeryThinSpace: '​', NestedGreaterGreater: '≫', NestedLessLess: '≪', NewLine: '\n', Nfr: '𝔑', NoBreak: '⁠', NonBreakingSpace: ' ', Nopf: 'ℕ', Not: '⫬', NotCongruent: '≢', NotCupCap: '≭', NotDoubleVerticalBar: '∦', NotElement: '∉', NotEqual: '≠', NotEqualTilde: '≂̸', NotExists: '∄', NotGreater: '≯', NotGreaterEqual: '≱', NotGreaterFullEqual: '≧̸', NotGreaterGreater: '≫̸', NotGreaterLess: '≹', NotGreaterSlantEqual: '⩾̸', NotGreaterTilde: '≵', NotHumpDownHump: '≎̸', NotHumpEqual: '≏̸', NotLeftTriangle: '⋪', NotLeftTriangleBar: '⧏̸', NotLeftTriangleEqual: '⋬', NotLess: '≮', NotLessEqual: '≰', NotLessGreater: '≸', NotLessLess: '≪̸', NotLessSlantEqual: '⩽̸', NotLessTilde: '≴', NotNestedGreaterGreater: '⪢̸', NotNestedLessLess: '⪡̸', NotPrecedes: '⊀', NotPrecedesEqual: '⪯̸', NotPrecedesSlantEqual: '⋠', NotReverseElement: '∌', NotRightTriangle: '⋫', NotRightTriangleBar: '⧐̸', NotRightTriangleEqual: '⋭', NotSquareSubset: '⊏̸', NotSquareSubsetEqual: '⋢', NotSquareSuperset: '⊐̸', NotSquareSupersetEqual: '⋣', NotSubset: '⊂⃒', NotSubsetEqual: '⊈', NotSucceeds: '⊁', NotSucceedsEqual: '⪰̸', NotSucceedsSlantEqual: '⋡', NotSucceedsTilde: '≿̸', NotSuperset: '⊃⃒', NotSupersetEqual: '⊉', NotTilde: '≁', NotTildeEqual: '≄', NotTildeFullEqual: '≇', NotTildeTilde: '≉', NotVerticalBar: '∤', Nscr: '𝒩', Ntilde: 'Ñ', Nu: 'Ν', OElig: 'Œ', Oacute: 'Ó', Ocirc: 'Ô', Ocy: 'О', Odblac: 'Ő', Ofr: '𝔒', Ograve: 'Ò', Omacr: 'Ō', Omega: 'Ω', Omicron: 'Ο', Oopf: '𝕆', OpenCurlyDoubleQuote: '“', OpenCurlyQuote: '‘', Or: '⩔', Oscr: '𝒪', Oslash: 'Ø', Otilde: 'Õ', Otimes: '⨷', Ouml: 'Ö', OverBar: '‾', OverBrace: '⏞', OverBracket: '⎴', OverParenthesis: '⏜', PartialD: '∂', Pcy: 'П', Pfr: '𝔓', Phi: 'Φ', Pi: 'Π', PlusMinus: '±', Poincareplane: 'ℌ', Popf: 'ℙ', Pr: '⪻', Precedes: '≺', PrecedesEqual: '⪯', PrecedesSlantEqual: '≼', PrecedesTilde: '≾', Prime: '″', Product: '∏', Proportion: '∷', Proportional: '∝', Pscr: '𝒫', Psi: 'Ψ', QUOT: '"', Qfr: '𝔔', Qopf: 'ℚ', Qscr: '𝒬', RBarr: '⤐', REG: '®', Racute: 'Ŕ', Rang: '⟫', Rarr: '↠', Rarrtl: '⤖', Rcaron: 'Ř', Rcedil: 'Ŗ', Rcy: 'Р', Re: 'ℜ', ReverseElement: '∋', ReverseEquilibrium: '⇋', ReverseUpEquilibrium: '⥯', Rfr: 'ℜ', Rho: 'Ρ', RightAngleBracket: '⟩', RightArrow: '→', RightArrowBar: '⇥', RightArrowLeftArrow: '⇄', RightCeiling: '⌉', RightDoubleBracket: '⟧', RightDownTeeVector: '⥝', RightDownVector: '⇂', RightDownVectorBar: '⥕', RightFloor: '⌋', RightTee: '⊢', RightTeeArrow: '↦', RightTeeVector: '⥛', RightTriangle: '⊳', RightTriangleBar: '⧐', RightTriangleEqual: '⊵', RightUpDownVector: '⥏', RightUpTeeVector: '⥜', RightUpVector: '↾', RightUpVectorBar: '⥔', RightVector: '⇀', RightVectorBar: '⥓', Rightarrow: '⇒', Ropf: 'ℝ', RoundImplies: '⥰', Rrightarrow: '⇛', Rscr: 'ℛ', Rsh: '↱', RuleDelayed: '⧴', SHCHcy: 'Щ', SHcy: 'Ш', SOFTcy: 'Ь', Sacute: 'Ś', Sc: '⪼', Scaron: 'Š', Scedil: 'Ş', Scirc: 'Ŝ', Scy: 'С', Sfr: '𝔖', ShortDownArrow: '↓', ShortLeftArrow: '←', ShortRightArrow: '→', ShortUpArrow: '↑', Sigma: 'Σ', SmallCircle: '∘', Sopf: '𝕊', Sqrt: '√', Square: '□', SquareIntersection: '⊓', SquareSubset: '⊏', SquareSubsetEqual: '⊑', SquareSuperset: '⊐', SquareSupersetEqual: '⊒', SquareUnion: '⊔', Sscr: '𝒮', Star: '⋆', Sub: '⋐', Subset: '⋐', SubsetEqual: '⊆', Succeeds: '≻', SucceedsEqual: '⪰', SucceedsSlantEqual: '≽', SucceedsTilde: '≿', SuchThat: '∋', Sum: '∑', Sup: '⋑', Superset: '⊃', SupersetEqual: '⊇', Supset: '⋑', THORN: 'Þ', TRADE: '™', TSHcy: 'Ћ', TScy: 'Ц', Tab: '\t', Tau: 'Τ', Tcaron: 'Ť', Tcedil: 'Ţ', Tcy: 'Т', Tfr: '𝔗', Therefore: '∴', Theta: 'Θ', ThickSpace: '  ', ThinSpace: ' ', Tilde: '∼', TildeEqual: '≃', TildeFullEqual: '≅', TildeTilde: '≈', Topf: '𝕋', TripleDot: '⃛', Tscr: '𝒯', Tstrok: 'Ŧ', Uacute: 'Ú', Uarr: '↟', Uarrocir: '⥉', Ubrcy: 'Ў', Ubreve: 'Ŭ', Ucirc: 'Û', Ucy: 'У', Udblac: 'Ű', Ufr: '𝔘', Ugrave: 'Ù', Umacr: 'Ū', UnderBar: '_', UnderBrace: '⏟', UnderBracket: '⎵', UnderParenthesis: '⏝', Union: '⋃', UnionPlus: '⊎', Uogon: 'Ų', Uopf: '𝕌', UpArrow: '↑', UpArrowBar: '⤒', UpArrowDownArrow: '⇅', UpDownArrow: '↕', UpEquilibrium: '⥮', UpTee: '⊥', UpTeeArrow: '↥', Uparrow: '⇑', Updownarrow: '⇕', UpperLeftArrow: '↖', UpperRightArrow: '↗', Upsi: 'ϒ', Upsilon: 'Υ', Uring: 'Ů', Uscr: '𝒰', Utilde: 'Ũ', Uuml: 'Ü', VDash: '⊫', Vbar: '⫫', Vcy: 'В', Vdash: '⊩', Vdashl: '⫦', Vee: '⋁', Verbar: '‖', Vert: '‖', VerticalBar: '∣', VerticalLine: '|', VerticalSeparator: '❘', VerticalTilde: '≀', VeryThinSpace: ' ', Vfr: '𝔙', Vopf: '𝕍', Vscr: '𝒱', Vvdash: '⊪', Wcirc: 'Ŵ', Wedge: '⋀', Wfr: '𝔚', Wopf: '𝕎', Wscr: '𝒲', Xfr: '𝔛', Xi: 'Ξ', Xopf: '𝕏', Xscr: '𝒳', YAcy: 'Я', YIcy: 'Ї', YUcy: 'Ю', Yacute: 'Ý', Ycirc: 'Ŷ', Ycy: 'Ы', Yfr: '𝔜', Yopf: '𝕐', Yscr: '𝒴', Yuml: 'Ÿ', ZHcy: 'Ж', Zacute: 'Ź', Zcaron: 'Ž', Zcy: 'З', Zdot: 'Ż', ZeroWidthSpace: '​', Zeta: 'Ζ', Zfr: 'ℨ', Zopf: 'ℤ', Zscr: '𝒵', aacute: 'á', abreve: 'ă', ac: '∾', acE: '∾̳', acd: '∿', acirc: 'â', acute: '´', acy: 'а', aelig: 'æ', af: '⁡', afr: '𝔞', agrave: 'à', alefsym: 'ℵ', aleph: 'ℵ', alpha: 'α', amacr: 'ā', amalg: '⨿', amp: '&', and: '∧', andand: '⩕', andd: '⩜', andslope: '⩘', andv: '⩚', ang: '∠', ange: '⦤', angle: '∠', angmsd: '∡', angmsdaa: '⦨', angmsdab: '⦩', angmsdac: '⦪', angmsdad: '⦫', angmsdae: '⦬', angmsdaf: '⦭', angmsdag: '⦮', angmsdah: '⦯', angrt: '∟', angrtvb: '⊾', angrtvbd: '⦝', angsph: '∢', angst: 'Å', angzarr: '⍼', aogon: 'ą', aopf: '𝕒', ap: '≈', apE: '⩰', apacir: '⩯', ape: '≊', apid: '≋', apos: "'", approx: '≈', approxeq: '≊', aring: 'å', ascr: '𝒶', ast: '*', asymp: '≈', asympeq: '≍', atilde: 'ã', auml: 'ä', awconint: '∳', awint: '⨑', bNot: '⫭', backcong: '≌', backepsilon: '϶', backprime: '‵', backsim: '∽', backsimeq: '⋍', barvee: '⊽', barwed: '⌅', barwedge: '⌅', bbrk: '⎵', bbrktbrk: '⎶', bcong: '≌', bcy: 'б', bdquo: '„', becaus: '∵', because: '∵', bemptyv: '⦰', bepsi: '϶', bernou: 'ℬ', beta: 'β', beth: 'ℶ', between: '≬', bfr: '𝔟', bigcap: '⋂', bigcirc: '◯', bigcup: '⋃', bigodot: '⨀', bigoplus: '⨁', bigotimes: '⨂', bigsqcup: '⨆', bigstar: '★', bigtriangledown: '▽', bigtriangleup: '△', biguplus: '⨄', bigvee: '⋁', bigwedge: '⋀', bkarow: '⤍', blacklozenge: '⧫', blacksquare: '▪', blacktriangle: '▴', blacktriangledown: '▾', blacktriangleleft: '◂', blacktriangleright: '▸', blank: '␣', blk12: '▒', blk14: '░', blk34: '▓', block: '█', bne: '=⃥', bnequiv: '≡⃥', bnot: '⌐', bopf: '𝕓', bot: '⊥', bottom: '⊥', bowtie: '⋈', boxDL: '╗', boxDR: '╔', boxDl: '╖', boxDr: '╓', boxH: '═', boxHD: '╦', boxHU: '╩', boxHd: '╤', boxHu: '╧', boxUL: '╝', boxUR: '╚', boxUl: '╜', boxUr: '╙', boxV: '║', boxVH: '╬', boxVL: '╣', boxVR: '╠', boxVh: '╫', boxVl: '╢', boxVr: '╟', boxbox: '⧉', boxdL: '╕', boxdR: '╒', boxdl: '┐', boxdr: '┌', boxh: '─', boxhD: '╥', boxhU: '╨', boxhd: '┬', boxhu: '┴', boxminus: '⊟', boxplus: '⊞', boxtimes: '⊠', boxuL: '╛', boxuR: '╘', boxul: '┘', boxur: '└', boxv: '│', boxvH: '╪', boxvL: '╡', boxvR: '╞', boxvh: '┼', boxvl: '┤', boxvr: '├', bprime: '‵', breve: '˘', brvbar: '¦', bscr: '𝒷', bsemi: '⁏', bsim: '∽', bsime: '⋍', bsol: '\\', bsolb: '⧅', bsolhsub: '⟈', bull: '•', bullet: '•', bump: '≎', bumpE: '⪮', bumpe: '≏', bumpeq: '≏', cacute: 'ć', cap: '∩', capand: '⩄', capbrcup: '⩉', capcap: '⩋', capcup: '⩇', capdot: '⩀', caps: '∩︀', caret: '⁁', caron: 'ˇ', ccaps: '⩍', ccaron: 'č', ccedil: 'ç', ccirc: 'ĉ', ccups: '⩌', ccupssm: '⩐', cdot: 'ċ', cedil: '¸', cemptyv: '⦲', cent: '¢', centerdot: '·', cfr: '𝔠', chcy: 'ч', check: '✓', checkmark: '✓', chi: 'χ', cir: '○', cirE: '⧃', circ: 'ˆ', circeq: '≗', circlearrowleft: '↺', circlearrowright: '↻', circledR: '®', circledS: 'Ⓢ', circledast: '⊛', circledcirc: '⊚', circleddash: '⊝', cire: '≗', cirfnint: '⨐', cirmid: '⫯', cirscir: '⧂', clubs: '♣', clubsuit: '♣', colon: ':', colone: '≔', coloneq: '≔', comma: ',', commat: '@', comp: '∁', compfn: '∘', complement: '∁', complexes: 'ℂ', cong: '≅', congdot: '⩭', conint: '∮', copf: '𝕔', coprod: '∐', copy: '©', copysr: '℗', crarr: '↵', cross: '✗', cscr: '𝒸', csub: '⫏', csube: '⫑', csup: '⫐', csupe: '⫒', ctdot: '⋯', cudarrl: '⤸', cudarrr: '⤵', cuepr: '⋞', cuesc: '⋟', cularr: '↶', cularrp: '⤽', cup: '∪', cupbrcap: '⩈', cupcap: '⩆', cupcup: '⩊', cupdot: '⊍', cupor: '⩅', cups: '∪︀', curarr: '↷', curarrm: '⤼', curlyeqprec: '⋞', curlyeqsucc: '⋟', curlyvee: '⋎', curlywedge: '⋏', curren: '¤', curvearrowleft: '↶', curvearrowright: '↷', cuvee: '⋎', cuwed: '⋏', cwconint: '∲', cwint: '∱', cylcty: '⌭', dArr: '⇓', dHar: '⥥', dagger: '†', daleth: 'ℸ', darr: '↓', dash: '‐', dashv: '⊣', dbkarow: '⤏', dblac: '˝', dcaron: 'ď', dcy: 'д', dd: 'ⅆ', ddagger: '‡', ddarr: '⇊', ddotseq: '⩷', deg: '°', delta: 'δ', demptyv: '⦱', dfisht: '⥿', dfr: '𝔡', dharl: '⇃', dharr: '⇂', diam: '⋄', diamond: '⋄', diamondsuit: '♦', diams: '♦', die: '¨', digamma: 'ϝ', disin: '⋲', div: '÷', divide: '÷', divideontimes: '⋇', divonx: '⋇', djcy: 'ђ', dlcorn: '⌞', dlcrop: '⌍', dollar: '$', dopf: '𝕕', dot: '˙', doteq: '≐', doteqdot: '≑', dotminus: '∸', dotplus: '∔', dotsquare: '⊡', doublebarwedge: '⌆', downarrow: '↓', downdownarrows: '⇊', downharpoonleft: '⇃', downharpoonright: '⇂', drbkarow: '⤐', drcorn: '⌟', drcrop: '⌌', dscr: '𝒹', dscy: 'ѕ', dsol: '⧶', dstrok: 'đ', dtdot: '⋱', dtri: '▿', dtrif: '▾', duarr: '⇵', duhar: '⥯', dwangle: '⦦', dzcy: 'џ', dzigrarr: '⟿', eDDot: '⩷', eDot: '≑', eacute: 'é', easter: '⩮', ecaron: 'ě', ecir: '≖', ecirc: 'ê', ecolon: '≕', ecy: 'э', edot: 'ė', ee: 'ⅇ', efDot: '≒', efr: '𝔢', eg: '⪚', egrave: 'è', egs: '⪖', egsdot: '⪘', el: '⪙', elinters: '⏧', ell: 'ℓ', els: '⪕', elsdot: '⪗', emacr: 'ē', empty: '∅', emptyset: '∅', emptyv: '∅', emsp13: ' ', emsp14: ' ', emsp: ' ', eng: 'ŋ', ensp: ' ', eogon: 'ę', eopf: '𝕖', epar: '⋕', eparsl: '⧣', eplus: '⩱', epsi: 'ε', epsilon: 'ε', epsiv: 'ϵ', eqcirc: '≖', eqcolon: '≕', eqsim: '≂', eqslantgtr: '⪖', eqslantless: '⪕', equals: '=', equest: '≟', equiv: '≡', equivDD: '⩸', eqvparsl: '⧥', erDot: '≓', erarr: '⥱', escr: 'ℯ', esdot: '≐', esim: '≂', eta: 'η', eth: 'ð', euml: 'ë', euro: '€', excl: '!', exist: '∃', expectation: 'ℰ', exponentiale: 'ⅇ', fallingdotseq: '≒', fcy: 'ф', female: '♀', ffilig: 'ffi', fflig: 'ff', ffllig: 'ffl', ffr: '𝔣', filig: 'fi', fjlig: 'fj', flat: '♭', fllig: 'fl', fltns: '▱', fnof: 'ƒ', fopf: '𝕗', forall: '∀', fork: '⋔', forkv: '⫙', fpartint: '⨍', frac12: '½', frac13: '⅓', frac14: '¼', frac15: '⅕', frac16: '⅙', frac18: '⅛', frac23: '⅔', frac25: '⅖', frac34: '¾', frac35: '⅗', frac38: '⅜', frac45: '⅘', frac56: '⅚', frac58: '⅝', frac78: '⅞', frasl: '⁄', frown: '⌢', fscr: '𝒻', gE: '≧', gEl: '⪌', gacute: 'ǵ', gamma: 'γ', gammad: 'ϝ', gap: '⪆', gbreve: 'ğ', gcirc: 'ĝ', gcy: 'г', gdot: 'ġ', ge: '≥', gel: '⋛', geq: '≥', geqq: '≧', geqslant: '⩾', ges: '⩾', gescc: '⪩', gesdot: '⪀', gesdoto: '⪂', gesdotol: '⪄', gesl: '⋛︀', gesles: '⪔', gfr: '𝔤', gg: '≫', ggg: '⋙', gimel: 'ℷ', gjcy: 'ѓ', gl: '≷', glE: '⪒', gla: '⪥', glj: '⪤', gnE: '≩', gnap: '⪊', gnapprox: '⪊', gne: '⪈', gneq: '⪈', gneqq: '≩', gnsim: '⋧', gopf: '𝕘', grave: '`', gscr: 'ℊ', gsim: '≳', gsime: '⪎', gsiml: '⪐', gt: '>', gtcc: '⪧', gtcir: '⩺', gtdot: '⋗', gtlPar: '⦕', gtquest: '⩼', gtrapprox: '⪆', gtrarr: '⥸', gtrdot: '⋗', gtreqless: '⋛', gtreqqless: '⪌', gtrless: '≷', gtrsim: '≳', gvertneqq: '≩︀', gvnE: '≩︀', hArr: '⇔', hairsp: ' ', half: '½', hamilt: 'ℋ', hardcy: 'ъ', harr: '↔', harrcir: '⥈', harrw: '↭', hbar: 'ℏ', hcirc: 'ĥ', hearts: '♥', heartsuit: '♥', hellip: '…', hercon: '⊹', hfr: '𝔥', hksearow: '⤥', hkswarow: '⤦', hoarr: '⇿', homtht: '∻', hookleftarrow: '↩', hookrightarrow: '↪', hopf: '𝕙', horbar: '―', hscr: '𝒽', hslash: 'ℏ', hstrok: 'ħ', hybull: '⁃', hyphen: '‐', iacute: 'í', ic: '⁣', icirc: 'î', icy: 'и', iecy: 'е', iexcl: '¡', iff: '⇔', ifr: '𝔦', igrave: 'ì', ii: 'ⅈ', iiiint: '⨌', iiint: '∭', iinfin: '⧜', iiota: '℩', ijlig: 'ij', imacr: 'ī', image: 'ℑ', imagline: 'ℐ', imagpart: 'ℑ', imath: 'ı', imof: '⊷', imped: 'Ƶ', in: '∈', incare: '℅', infin: '∞', infintie: '⧝', inodot: 'ı', int: '∫', intcal: '⊺', integers: 'ℤ', intercal: '⊺', intlarhk: '⨗', intprod: '⨼', iocy: 'ё', iogon: 'į', iopf: '𝕚', iota: 'ι', iprod: '⨼', iquest: '¿', iscr: '𝒾', isin: '∈', isinE: '⋹', isindot: '⋵', isins: '⋴', isinsv: '⋳', isinv: '∈', it: '⁢', itilde: 'ĩ', iukcy: 'і', iuml: 'ï', jcirc: 'ĵ', jcy: 'й', jfr: '𝔧', jmath: 'ȷ', jopf: '𝕛', jscr: '𝒿', jsercy: 'ј', jukcy: 'є', kappa: 'κ', kappav: 'ϰ', kcedil: 'ķ', kcy: 'к', kfr: '𝔨', kgreen: 'ĸ', khcy: 'х', kjcy: 'ќ', kopf: '𝕜', kscr: '𝓀', lAarr: '⇚', lArr: '⇐', lAtail: '⤛', lBarr: '⤎', lE: '≦', lEg: '⪋', lHar: '⥢', lacute: 'ĺ', laemptyv: '⦴', lagran: 'ℒ', lambda: 'λ', lang: '⟨', langd: '⦑', langle: '⟨', lap: '⪅', laquo: '«', larr: '←', larrb: '⇤', larrbfs: '⤟', larrfs: '⤝', larrhk: '↩', larrlp: '↫', larrpl: '⤹', larrsim: '⥳', larrtl: '↢', lat: '⪫', latail: '⤙', late: '⪭', lates: '⪭︀', lbarr: '⤌', lbbrk: '❲', lbrace: '{', lbrack: '[', lbrke: '⦋', lbrksld: '⦏', lbrkslu: '⦍', lcaron: 'ľ', lcedil: 'ļ', lceil: '⌈', lcub: '{', lcy: 'л', ldca: '⤶', ldquo: '“', ldquor: '„', ldrdhar: '⥧', ldrushar: '⥋', ldsh: '↲', le: '≤', leftarrow: '←', leftarrowtail: '↢', leftharpoondown: '↽', leftharpoonup: '↼', leftleftarrows: '⇇', leftrightarrow: '↔', leftrightarrows: '⇆', leftrightharpoons: '⇋', leftrightsquigarrow: '↭', leftthreetimes: '⋋', leg: '⋚', leq: '≤', leqq: '≦', leqslant: '⩽', les: '⩽', lescc: '⪨', lesdot: '⩿', lesdoto: '⪁', lesdotor: '⪃', lesg: '⋚︀', lesges: '⪓', lessapprox: '⪅', lessdot: '⋖', lesseqgtr: '⋚', lesseqqgtr: '⪋', lessgtr: '≶', lesssim: '≲', lfisht: '⥼', lfloor: '⌊', lfr: '𝔩', lg: '≶', lgE: '⪑', lhard: '↽', lharu: '↼', lharul: '⥪', lhblk: '▄', ljcy: 'љ', ll: '≪', llarr: '⇇', llcorner: '⌞', llhard: '⥫', lltri: '◺', lmidot: 'ŀ', lmoust: '⎰', lmoustache: '⎰', lnE: '≨', lnap: '⪉', lnapprox: '⪉', lne: '⪇', lneq: '⪇', lneqq: '≨', lnsim: '⋦', loang: '⟬', loarr: '⇽', lobrk: '⟦', longleftarrow: '⟵', longleftrightarrow: '⟷', longmapsto: '⟼', longrightarrow: '⟶', looparrowleft: '↫', looparrowright: '↬', lopar: '⦅', lopf: '𝕝', loplus: '⨭', lotimes: '⨴', lowast: '∗', lowbar: '_', loz: '◊', lozenge: '◊', lozf: '⧫', lpar: '(', lparlt: '⦓', lrarr: '⇆', lrcorner: '⌟', lrhar: '⇋', lrhard: '⥭', lrm: '‎', lrtri: '⊿', lsaquo: '‹', lscr: '𝓁', lsh: '↰', lsim: '≲', lsime: '⪍', lsimg: '⪏', lsqb: '[', lsquo: '‘', lsquor: '‚', lstrok: 'ł', lt: '<', ltcc: '⪦', ltcir: '⩹', ltdot: '⋖', lthree: '⋋', ltimes: '⋉', ltlarr: '⥶', ltquest: '⩻', ltrPar: '⦖', ltri: '◃', ltrie: '⊴', ltrif: '◂', lurdshar: '⥊', luruhar: '⥦', lvertneqq: '≨︀', lvnE: '≨︀', mDDot: '∺', macr: '¯', male: '♂', malt: '✠', maltese: '✠', map: '↦', mapsto: '↦', mapstodown: '↧', mapstoleft: '↤', mapstoup: '↥', marker: '▮', mcomma: '⨩', mcy: 'м', mdash: '—', measuredangle: '∡', mfr: '𝔪', mho: '℧', micro: 'µ', mid: '∣', midast: '*', midcir: '⫰', middot: '·', minus: '−', minusb: '⊟', minusd: '∸', minusdu: '⨪', mlcp: '⫛', mldr: '…', mnplus: '∓', models: '⊧', mopf: '𝕞', mp: '∓', mscr: '𝓂', mstpos: '∾', mu: 'μ', multimap: '⊸', mumap: '⊸', nGg: '⋙̸', nGt: '≫⃒', nGtv: '≫̸', nLeftarrow: '⇍', nLeftrightarrow: '⇎', nLl: '⋘̸', nLt: '≪⃒', nLtv: '≪̸', nRightarrow: '⇏', nVDash: '⊯', nVdash: '⊮', nabla: '∇', nacute: 'ń', nang: '∠⃒', nap: '≉', napE: '⩰̸', napid: '≋̸', napos: 'ʼn', napprox: '≉', natur: '♮', natural: '♮', naturals: 'ℕ', nbsp: ' ', nbump: '≎̸', nbumpe: '≏̸', ncap: '⩃', ncaron: 'ň', ncedil: 'ņ', ncong: '≇', ncongdot: '⩭̸', ncup: '⩂', ncy: 'н', ndash: '–', ne: '≠', neArr: '⇗', nearhk: '⤤', nearr: '↗', nearrow: '↗', nedot: '≐̸', nequiv: '≢', nesear: '⤨', nesim: '≂̸', nexist: '∄', nexists: '∄', nfr: '𝔫', ngE: '≧̸', nge: '≱', ngeq: '≱', ngeqq: '≧̸', ngeqslant: '⩾̸', nges: '⩾̸', ngsim: '≵', ngt: '≯', ngtr: '≯', nhArr: '⇎', nharr: '↮', nhpar: '⫲', ni: '∋', nis: '⋼', nisd: '⋺', niv: '∋', njcy: 'њ', nlArr: '⇍', nlE: '≦̸', nlarr: '↚', nldr: '‥', nle: '≰', nleftarrow: '↚', nleftrightarrow: '↮', nleq: '≰', nleqq: '≦̸', nleqslant: '⩽̸', nles: '⩽̸', nless: '≮', nlsim: '≴', nlt: '≮', nltri: '⋪', nltrie: '⋬', nmid: '∤', nopf: '𝕟', not: '¬', notin: '∉', notinE: '⋹̸', notindot: '⋵̸', notinva: '∉', notinvb: '⋷', notinvc: '⋶', notni: '∌', notniva: '∌', notnivb: '⋾', notnivc: '⋽', npar: '∦', nparallel: '∦', nparsl: '⫽⃥', npart: '∂̸', npolint: '⨔', npr: '⊀', nprcue: '⋠', npre: '⪯̸', nprec: '⊀', npreceq: '⪯̸', nrArr: '⇏', nrarr: '↛', nrarrc: '⤳̸', nrarrw: '↝̸', nrightarrow: '↛', nrtri: '⋫', nrtrie: '⋭', nsc: '⊁', nsccue: '⋡', nsce: '⪰̸', nscr: '𝓃', nshortmid: '∤', nshortparallel: '∦', nsim: '≁', nsime: '≄', nsimeq: '≄', nsmid: '∤', nspar: '∦', nsqsube: '⋢', nsqsupe: '⋣', nsub: '⊄', nsubE: '⫅̸', nsube: '⊈', nsubset: '⊂⃒', nsubseteq: '⊈', nsubseteqq: '⫅̸', nsucc: '⊁', nsucceq: '⪰̸', nsup: '⊅', nsupE: '⫆̸', nsupe: '⊉', nsupset: '⊃⃒', nsupseteq: '⊉', nsupseteqq: '⫆̸', ntgl: '≹', ntilde: 'ñ', ntlg: '≸', ntriangleleft: '⋪', ntrianglelefteq: '⋬', ntriangleright: '⋫', ntrianglerighteq: '⋭', nu: 'ν', num: '#', numero: '№', numsp: ' ', nvDash: '⊭', nvHarr: '⤄', nvap: '≍⃒', nvdash: '⊬', nvge: '≥⃒', nvgt: '>⃒', nvinfin: '⧞', nvlArr: '⤂', nvle: '≤⃒', nvlt: '<⃒', nvltrie: '⊴⃒', nvrArr: '⤃', nvrtrie: '⊵⃒', nvsim: '∼⃒', nwArr: '⇖', nwarhk: '⤣', nwarr: '↖', nwarrow: '↖', nwnear: '⤧', oS: 'Ⓢ', oacute: 'ó', oast: '⊛', ocir: '⊚', ocirc: 'ô', ocy: 'о', odash: '⊝', odblac: 'ő', odiv: '⨸', odot: '⊙', odsold: '⦼', oelig: 'œ', ofcir: '⦿', ofr: '𝔬', ogon: '˛', ograve: 'ò', ogt: '⧁', ohbar: '⦵', ohm: 'Ω', oint: '∮', olarr: '↺', olcir: '⦾', olcross: '⦻', oline: '‾', olt: '⧀', omacr: 'ō', omega: 'ω', omicron: 'ο', omid: '⦶', ominus: '⊖', oopf: '𝕠', opar: '⦷', operp: '⦹', oplus: '⊕', or: '∨', orarr: '↻', ord: '⩝', order: 'ℴ', orderof: 'ℴ', ordf: 'ª', ordm: 'º', origof: '⊶', oror: '⩖', orslope: '⩗', orv: '⩛', oscr: 'ℴ', oslash: 'ø', osol: '⊘', otilde: 'õ', otimes: '⊗', otimesas: '⨶', ouml: 'ö', ovbar: '⌽', par: '∥', para: '¶', parallel: '∥', parsim: '⫳', parsl: '⫽', part: '∂', pcy: 'п', percnt: '%', period: '.', permil: '‰', perp: '⊥', pertenk: '‱', pfr: '𝔭', phi: 'φ', phiv: 'ϕ', phmmat: 'ℳ', phone: '☎', pi: 'π', pitchfork: '⋔', piv: 'ϖ', planck: 'ℏ', planckh: 'ℎ', plankv: 'ℏ', plus: '+', plusacir: '⨣', plusb: '⊞', pluscir: '⨢', plusdo: '∔', plusdu: '⨥', pluse: '⩲', plusmn: '±', plussim: '⨦', plustwo: '⨧', pm: '±', pointint: '⨕', popf: '𝕡', pound: '£', pr: '≺', prE: '⪳', prap: '⪷', prcue: '≼', pre: '⪯', prec: '≺', precapprox: '⪷', preccurlyeq: '≼', preceq: '⪯', precnapprox: '⪹', precneqq: '⪵', precnsim: '⋨', precsim: '≾', prime: '′', primes: 'ℙ', prnE: '⪵', prnap: '⪹', prnsim: '⋨', prod: '∏', profalar: '⌮', profline: '⌒', profsurf: '⌓', prop: '∝', propto: '∝', prsim: '≾', prurel: '⊰', pscr: '𝓅', psi: 'ψ', puncsp: ' ', qfr: '𝔮', qint: '⨌', qopf: '𝕢', qprime: '⁗', qscr: '𝓆', quaternions: 'ℍ', quatint: '⨖', quest: '?', questeq: '≟', quot: '"', rAarr: '⇛', rArr: '⇒', rAtail: '⤜', rBarr: '⤏', rHar: '⥤', race: '∽̱', racute: 'ŕ', radic: '√', raemptyv: '⦳', rang: '⟩', rangd: '⦒', range: '⦥', rangle: '⟩', raquo: '»', rarr: '→', rarrap: '⥵', rarrb: '⇥', rarrbfs: '⤠', rarrc: '⤳', rarrfs: '⤞', rarrhk: '↪', rarrlp: '↬', rarrpl: '⥅', rarrsim: '⥴', rarrtl: '↣', rarrw: '↝', ratail: '⤚', ratio: '∶', rationals: 'ℚ', rbarr: '⤍', rbbrk: '❳', rbrace: '}', rbrack: ']', rbrke: '⦌', rbrksld: '⦎', rbrkslu: '⦐', rcaron: 'ř', rcedil: 'ŗ', rceil: '⌉', rcub: '}', rcy: 'р', rdca: '⤷', rdldhar: '⥩', rdquo: '”', rdquor: '”', rdsh: '↳', real: 'ℜ', realine: 'ℛ', realpart: 'ℜ', reals: 'ℝ', rect: '▭', reg: '®', rfisht: '⥽', rfloor: '⌋', rfr: '𝔯', rhard: '⇁', rharu: '⇀', rharul: '⥬', rho: 'ρ', rhov: 'ϱ', rightarrow: '→', rightarrowtail: '↣', rightharpoondown: '⇁', rightharpoonup: '⇀', rightleftarrows: '⇄', rightleftharpoons: '⇌', rightrightarrows: '⇉', rightsquigarrow: '↝', rightthreetimes: '⋌', ring: '˚', risingdotseq: '≓', rlarr: '⇄', rlhar: '⇌', rlm: '‏', rmoust: '⎱', rmoustache: '⎱', rnmid: '⫮', roang: '⟭', roarr: '⇾', robrk: '⟧', ropar: '⦆', ropf: '𝕣', roplus: '⨮', rotimes: '⨵', rpar: ')', rpargt: '⦔', rppolint: '⨒', rrarr: '⇉', rsaquo: '›', rscr: '𝓇', rsh: '↱', rsqb: ']', rsquo: '’', rsquor: '’', rthree: '⋌', rtimes: '⋊', rtri: '▹', rtrie: '⊵', rtrif: '▸', rtriltri: '⧎', ruluhar: '⥨', rx: '℞', sacute: 'ś', sbquo: '‚', sc: '≻', scE: '⪴', scap: '⪸', scaron: 'š', sccue: '≽', sce: '⪰', scedil: 'ş', scirc: 'ŝ', scnE: '⪶', scnap: '⪺', scnsim: '⋩', scpolint: '⨓', scsim: '≿', scy: 'с', sdot: '⋅', sdotb: '⊡', sdote: '⩦', seArr: '⇘', searhk: '⤥', searr: '↘', searrow: '↘', sect: '§', semi: ';', seswar: '⤩', setminus: '∖', setmn: '∖', sext: '✶', sfr: '𝔰', sfrown: '⌢', sharp: '♯', shchcy: 'щ', shcy: 'ш', shortmid: '∣', shortparallel: '∥', shy: '­', sigma: 'σ', sigmaf: 'ς', sigmav: 'ς', sim: '∼', simdot: '⩪', sime: '≃', simeq: '≃', simg: '⪞', simgE: '⪠', siml: '⪝', simlE: '⪟', simne: '≆', simplus: '⨤', simrarr: '⥲', slarr: '←', smallsetminus: '∖', smashp: '⨳', smeparsl: '⧤', smid: '∣', smile: '⌣', smt: '⪪', smte: '⪬', smtes: '⪬︀', softcy: 'ь', sol: '/', solb: '⧄', solbar: '⌿', sopf: '𝕤', spades: '♠', spadesuit: '♠', spar: '∥', sqcap: '⊓', sqcaps: '⊓︀', sqcup: '⊔', sqcups: '⊔︀', sqsub: '⊏', sqsube: '⊑', sqsubset: '⊏', sqsubseteq: '⊑', sqsup: '⊐', sqsupe: '⊒', sqsupset: '⊐', sqsupseteq: '⊒', squ: '□', square: '□', squarf: '▪', squf: '▪', srarr: '→', sscr: '𝓈', ssetmn: '∖', ssmile: '⌣', sstarf: '⋆', star: '☆', starf: '★', straightepsilon: 'ϵ', straightphi: 'ϕ', strns: '¯', sub: '⊂', subE: '⫅', subdot: '⪽', sube: '⊆', subedot: '⫃', submult: '⫁', subnE: '⫋', subne: '⊊', subplus: '⪿', subrarr: '⥹', subset: '⊂', subseteq: '⊆', subseteqq: '⫅', subsetneq: '⊊', subsetneqq: '⫋', subsim: '⫇', subsub: '⫕', subsup: '⫓', succ: '≻', succapprox: '⪸', succcurlyeq: '≽', succeq: '⪰', succnapprox: '⪺', succneqq: '⪶', succnsim: '⋩', succsim: '≿', sum: '∑', sung: '♪', sup1: '¹', sup2: '²', sup3: '³', sup: '⊃', supE: '⫆', supdot: '⪾', supdsub: '⫘', supe: '⊇', supedot: '⫄', suphsol: '⟉', suphsub: '⫗', suplarr: '⥻', supmult: '⫂', supnE: '⫌', supne: '⊋', supplus: '⫀', supset: '⊃', supseteq: '⊇', supseteqq: '⫆', supsetneq: '⊋', supsetneqq: '⫌', supsim: '⫈', supsub: '⫔', supsup: '⫖', swArr: '⇙', swarhk: '⤦', swarr: '↙', swarrow: '↙', swnwar: '⤪', szlig: 'ß', target: '⌖', tau: 'τ', tbrk: '⎴', tcaron: 'ť', tcedil: 'ţ', tcy: 'т', tdot: '⃛', telrec: '⌕', tfr: '𝔱', there4: '∴', therefore: '∴', theta: 'θ', thetasym: 'ϑ', thetav: 'ϑ', thickapprox: '≈', thicksim: '∼', thinsp: ' ', thkap: '≈', thksim: '∼', thorn: 'þ', tilde: '˜', times: '×', timesb: '⊠', timesbar: '⨱', timesd: '⨰', tint: '∭', toea: '⤨', top: '⊤', topbot: '⌶', topcir: '⫱', topf: '𝕥', topfork: '⫚', tosa: '⤩', tprime: '‴', trade: '™', triangle: '▵', triangledown: '▿', triangleleft: '◃', trianglelefteq: '⊴', triangleq: '≜', triangleright: '▹', trianglerighteq: '⊵', tridot: '◬', trie: '≜', triminus: '⨺', triplus: '⨹', trisb: '⧍', tritime: '⨻', trpezium: '⏢', tscr: '𝓉', tscy: 'ц', tshcy: 'ћ', tstrok: 'ŧ', twixt: '≬', twoheadleftarrow: '↞', twoheadrightarrow: '↠', uArr: '⇑', uHar: '⥣', uacute: 'ú', uarr: '↑', ubrcy: 'ў', ubreve: 'ŭ', ucirc: 'û', ucy: 'у', udarr: '⇅', udblac: 'ű', udhar: '⥮', ufisht: '⥾', ufr: '𝔲', ugrave: 'ù', uharl: '↿', uharr: '↾', uhblk: '▀', ulcorn: '⌜', ulcorner: '⌜', ulcrop: '⌏', ultri: '◸', umacr: 'ū', uml: '¨', uogon: 'ų', uopf: '𝕦', uparrow: '↑', updownarrow: '↕', upharpoonleft: '↿', upharpoonright: '↾', uplus: '⊎', upsi: 'υ', upsih: 'ϒ', upsilon: 'υ', upuparrows: '⇈', urcorn: '⌝', urcorner: '⌝', urcrop: '⌎', uring: 'ů', urtri: '◹', uscr: '𝓊', utdot: '⋰', utilde: 'ũ', utri: '▵', utrif: '▴', uuarr: '⇈', uuml: 'ü', uwangle: '⦧', vArr: '⇕', vBar: '⫨', vBarv: '⫩', vDash: '⊨', vangrt: '⦜', varepsilon: 'ϵ', varkappa: 'ϰ', varnothing: '∅', varphi: 'ϕ', varpi: 'ϖ', varpropto: '∝', varr: '↕', varrho: 'ϱ', varsigma: 'ς', varsubsetneq: '⊊︀', varsubsetneqq: '⫋︀', varsupsetneq: '⊋︀', varsupsetneqq: '⫌︀', vartheta: 'ϑ', vartriangleleft: '⊲', vartriangleright: '⊳', vcy: 'в', vdash: '⊢', vee: '∨', veebar: '⊻', veeeq: '≚', vellip: '⋮', verbar: '|', vert: '|', vfr: '𝔳', vltri: '⊲', vnsub: '⊂⃒', vnsup: '⊃⃒', vopf: '𝕧', vprop: '∝', vrtri: '⊳', vscr: '𝓋', vsubnE: '⫋︀', vsubne: '⊊︀', vsupnE: '⫌︀', vsupne: '⊋︀', vzigzag: '⦚', wcirc: 'ŵ', wedbar: '⩟', wedge: '∧', wedgeq: '≙', weierp: '℘', wfr: '𝔴', wopf: '𝕨', wp: '℘', wr: '≀', wreath: '≀', wscr: '𝓌', xcap: '⋂', xcirc: '◯', xcup: '⋃', xdtri: '▽', xfr: '𝔵', xhArr: '⟺', xharr: '⟷', xi: 'ξ', xlArr: '⟸', xlarr: '⟵', xmap: '⟼', xnis: '⋻', xodot: '⨀', xopf: '𝕩', xoplus: '⨁', xotime: '⨂', xrArr: '⟹', xrarr: '⟶', xscr: '𝓍', xsqcup: '⨆', xuplus: '⨄', xutri: '△', xvee: '⋁', xwedge: '⋀', yacute: 'ý', yacy: 'я', ycirc: 'ŷ', ycy: 'ы', yen: '¥', yfr: '𝔶', yicy: 'ї', yopf: '𝕪', yscr: '𝓎', yucy: 'ю', yuml: 'ÿ', zacute: 'ź', zcaron: 'ž', zcy: 'з', zdot: 'ż', zeetrf: 'ℨ', zeta: 'ζ', zfr: '𝔷', zhcy: 'ж', zigrarr: '⇝', zopf: '𝕫', zscr: '𝓏', zwj: '‍', zwnj: '‌' }; const own$4 = {}.hasOwnProperty; /** * Decode a single character reference (without the `&` or `;`). * You probably only need this when you’re building parsers yourself that follow * different rules compared to HTML. * This is optimized to be tiny in browsers. * * @param {string} value * `notin` (named), `#123` (deci), `#x123` (hexa). * @returns {string|false} * Decoded reference. */ function decodeNamedCharacterReference(value) { return own$4.call(characterEntities, value) ? characterEntities[value] : false } /** * Like `Array#splice`, but smarter for giant arrays. * * `Array#splice` takes all items to be inserted as individual argument which * causes a stack overflow in V8 when trying to insert 100k items for instance. * * Otherwise, this does not return the removed items, and takes `items` as an * array instead of rest parameters. * * @template {unknown} T * Item type. * @param {Array} list * List to operate on. * @param {number} start * Index to remove/insert at (can be negative). * @param {number} remove * Number of items to remove. * @param {Array} items * Items to inject into `list`. * @returns {undefined} * Nothing. */ function splice$1(list, start, remove, items) { const end = list.length; let chunkStart = 0; /** @type {Array} */ let parameters; // Make start between zero and `end` (included). if (start < 0) { start = -start > end ? 0 : end + start; } else { start = start > end ? end : start; } remove = remove > 0 ? remove : 0; // No need to chunk the items if there’s only a couple (10k) items. if (items.length < 10000) { parameters = Array.from(items); parameters.unshift(start, remove); // @ts-expect-error Hush, it’s fine. list.splice(...parameters); } else { // Delete `remove` items starting from `start` if (remove) list.splice(start, remove); // Insert the items in chunks to not cause stack overflows. while (chunkStart < items.length) { parameters = items.slice(chunkStart, chunkStart + 10000); parameters.unshift(start, 0); // @ts-expect-error Hush, it’s fine. list.splice(...parameters); chunkStart += 10000; start += 10000; } } } /** * Append `items` (an array) at the end of `list` (another array). * When `list` was empty, returns `items` instead. * * This prevents a potentially expensive operation when `list` is empty, * and adds items in batches to prevent V8 from hanging. * * @template {unknown} T * Item type. * @param {Array} list * List to operate on. * @param {Array} items * Items to add to `list`. * @returns {Array} * Either `list` or `items`. */ function push(list, items) { if (list.length > 0) { splice$1(list, list.length, 0, items); return list } return items } /** * @typedef {import('micromark-util-types').Extension} Extension * @typedef {import('micromark-util-types').Handles} Handles * @typedef {import('micromark-util-types').HtmlExtension} HtmlExtension * @typedef {import('micromark-util-types').NormalizedExtension} NormalizedExtension */ const hasOwnProperty$a = {}.hasOwnProperty; /** * Combine multiple syntax extensions into one. * * @param {Array} extensions * List of syntax extensions. * @returns {NormalizedExtension} * A single combined extension. */ function combineExtensions(extensions) { /** @type {NormalizedExtension} */ const all = {}; let index = -1; while (++index < extensions.length) { syntaxExtension(all, extensions[index]); } return all } /** * Merge `extension` into `all`. * * @param {NormalizedExtension} all * Extension to merge into. * @param {Extension} extension * Extension to merge. * @returns {undefined} */ function syntaxExtension(all, extension) { /** @type {keyof Extension} */ let hook; for (hook in extension) { const maybe = hasOwnProperty$a.call(all, hook) ? all[hook] : undefined; /** @type {Record} */ const left = maybe || (all[hook] = {}); /** @type {Record | undefined} */ const right = extension[hook]; /** @type {string} */ let code; if (right) { for (code in right) { if (!hasOwnProperty$a.call(left, code)) left[code] = []; const value = right[code]; constructs( // @ts-expect-error Looks like a list. left[code], Array.isArray(value) ? value : value ? [value] : [] ); } } } } /** * Merge `list` into `existing` (both lists of constructs). * Mutates `existing`. * * @param {Array} existing * @param {Array} list * @returns {undefined} */ function constructs(existing, list) { let index = -1; /** @type {Array} */ const before = []; while (++index < list.length) { (list[index].add === 'after' ? existing : before).push(list[index]); } splice$1(existing, 0, 0, before); } /** * Turn the number (in string form as either hexa- or plain decimal) coming from * a numeric character reference into a character. * * Sort of like `String.fromCodePoint(Number.parseInt(value, base))`, but makes * non-characters and control characters safe. * * @param {string} value * Value to decode. * @param {number} base * Numeric base. * @returns {string} * Character. */ function decodeNumericCharacterReference(value, base) { const code = Number.parseInt(value, base); if ( // C0 except for HT, LF, FF, CR, space. code < 9 || code === 11 || code > 13 && code < 32 || // Control character (DEL) of C0, and C1 controls. code > 126 && code < 160 || // Lone high surrogates and low surrogates. code > 55_295 && code < 57_344 || // Noncharacters. code > 64_975 && code < 65_008 || /* eslint-disable no-bitwise */ (code & 65_535) === 65_535 || (code & 65_535) === 65_534 || /* eslint-enable no-bitwise */ // Out of range code > 1_114_111) { return "\uFFFD"; } return String.fromCodePoint(code); } /** * Normalize an identifier (as found in references, definitions). * * Collapses markdown whitespace, trim, and then lower- and uppercase. * * Some characters are considered “uppercase”, such as U+03F4 (`ϴ`), but if their * lowercase counterpart (U+03B8 (`θ`)) is uppercased will result in a different * uppercase character (U+0398 (`Θ`)). * So, to get a canonical form, we perform both lower- and uppercase. * * Using uppercase last makes sure keys will never interact with default * prototypal values (such as `constructor`): nothing in the prototype of * `Object` is uppercase. * * @param {string} value * Identifier to normalize. * @returns {string} * Normalized identifier. */ function normalizeIdentifier(value) { return ( value // Collapse markdown whitespace. .replace(/[\t\n\r ]+/g, ' ') // Trim. .replace(/^ | $/g, '') // Some characters are considered “uppercase”, but if their lowercase // counterpart is uppercased will result in a different uppercase // character. // Hence, to get that form, we perform both lower- and uppercase. // Upper case makes sure keys will not interact with default prototypal // methods: no method is uppercase. .toLowerCase() .toUpperCase() ) } /** * @typedef {import('micromark-util-types').Code} Code */ /** * Check whether the character code represents an ASCII alpha (`a` through `z`, * case insensitive). * * An **ASCII alpha** is an ASCII upper alpha or ASCII lower alpha. * * An **ASCII upper alpha** is a character in the inclusive range U+0041 (`A`) * to U+005A (`Z`). * * An **ASCII lower alpha** is a character in the inclusive range U+0061 (`a`) * to U+007A (`z`). * * @param code * Code. * @returns {boolean} * Whether it matches. */ const asciiAlpha = regexCheck(/[A-Za-z]/); /** * Check whether the character code represents an ASCII alphanumeric (`a` * through `z`, case insensitive, or `0` through `9`). * * An **ASCII alphanumeric** is an ASCII digit (see `asciiDigit`) or ASCII alpha * (see `asciiAlpha`). * * @param code * Code. * @returns {boolean} * Whether it matches. */ const asciiAlphanumeric = regexCheck(/[\dA-Za-z]/); /** * Check whether the character code represents an ASCII atext. * * atext is an ASCII alphanumeric (see `asciiAlphanumeric`), or a character in * the inclusive ranges U+0023 NUMBER SIGN (`#`) to U+0027 APOSTROPHE (`'`), * U+002A ASTERISK (`*`), U+002B PLUS SIGN (`+`), U+002D DASH (`-`), U+002F * SLASH (`/`), U+003D EQUALS TO (`=`), U+003F QUESTION MARK (`?`), U+005E * CARET (`^`) to U+0060 GRAVE ACCENT (`` ` ``), or U+007B LEFT CURLY BRACE * (`{`) to U+007E TILDE (`~`). * * See: * **\[RFC5322]**: * [Internet Message Format](https://tools.ietf.org/html/rfc5322). * P. Resnick. * IETF. * * @param code * Code. * @returns {boolean} * Whether it matches. */ const asciiAtext = regexCheck(/[#-'*+\--9=?A-Z^-~]/); /** * Check whether a character code is an ASCII control character. * * An **ASCII control** is a character in the inclusive range U+0000 NULL (NUL) * to U+001F (US), or U+007F (DEL). * * @param {Code} code * Code. * @returns {boolean} * Whether it matches. */ function asciiControl(code) { return ( // Special whitespace codes (which have negative values), C0 and Control // character DEL code !== null && (code < 32 || code === 127) ); } /** * Check whether the character code represents an ASCII digit (`0` through `9`). * * An **ASCII digit** is a character in the inclusive range U+0030 (`0`) to * U+0039 (`9`). * * @param code * Code. * @returns {boolean} * Whether it matches. */ const asciiDigit = regexCheck(/\d/); /** * Check whether the character code represents an ASCII hex digit (`a` through * `f`, case insensitive, or `0` through `9`). * * An **ASCII hex digit** is an ASCII digit (see `asciiDigit`), ASCII upper hex * digit, or an ASCII lower hex digit. * * An **ASCII upper hex digit** is a character in the inclusive range U+0041 * (`A`) to U+0046 (`F`). * * An **ASCII lower hex digit** is a character in the inclusive range U+0061 * (`a`) to U+0066 (`f`). * * @param code * Code. * @returns {boolean} * Whether it matches. */ const asciiHexDigit = regexCheck(/[\dA-Fa-f]/); /** * Check whether the character code represents ASCII punctuation. * * An **ASCII punctuation** is a character in the inclusive ranges U+0021 * EXCLAMATION MARK (`!`) to U+002F SLASH (`/`), U+003A COLON (`:`) to U+0040 AT * SIGN (`@`), U+005B LEFT SQUARE BRACKET (`[`) to U+0060 GRAVE ACCENT * (`` ` ``), or U+007B LEFT CURLY BRACE (`{`) to U+007E TILDE (`~`). * * @param code * Code. * @returns {boolean} * Whether it matches. */ const asciiPunctuation = regexCheck(/[!-/:-@[-`{-~]/); /** * Check whether a character code is a markdown line ending. * * A **markdown line ending** is the virtual characters M-0003 CARRIAGE RETURN * LINE FEED (CRLF), M-0004 LINE FEED (LF) and M-0005 CARRIAGE RETURN (CR). * * In micromark, the actual character U+000A LINE FEED (LF) and U+000D CARRIAGE * RETURN (CR) are replaced by these virtual characters depending on whether * they occurred together. * * @param {Code} code * Code. * @returns {boolean} * Whether it matches. */ function markdownLineEnding(code) { return code !== null && code < -2; } /** * Check whether a character code is a markdown line ending (see * `markdownLineEnding`) or markdown space (see `markdownSpace`). * * @param {Code} code * Code. * @returns {boolean} * Whether it matches. */ function markdownLineEndingOrSpace(code) { return code !== null && (code < 0 || code === 32); } /** * Check whether a character code is a markdown space. * * A **markdown space** is the concrete character U+0020 SPACE (SP) and the * virtual characters M-0001 VIRTUAL SPACE (VS) and M-0002 HORIZONTAL TAB (HT). * * In micromark, the actual character U+0009 CHARACTER TABULATION (HT) is * replaced by one M-0002 HORIZONTAL TAB (HT) and between 0 and 3 M-0001 VIRTUAL * SPACE (VS) characters, depending on the column at which the tab occurred. * * @param {Code} code * Code. * @returns {boolean} * Whether it matches. */ function markdownSpace(code) { return code === -2 || code === -1 || code === 32; } // Size note: removing ASCII from the regex and using `asciiPunctuation` here // In fact adds to the bundle size. /** * Check whether the character code represents Unicode punctuation. * * A **Unicode punctuation** is a character in the Unicode `Pc` (Punctuation, * Connector), `Pd` (Punctuation, Dash), `Pe` (Punctuation, Close), `Pf` * (Punctuation, Final quote), `Pi` (Punctuation, Initial quote), `Po` * (Punctuation, Other), or `Ps` (Punctuation, Open) categories, or an ASCII * punctuation (see `asciiPunctuation`). * * See: * **\[UNICODE]**: * [The Unicode Standard](https://www.unicode.org/versions/). * Unicode Consortium. * * @param code * Code. * @returns * Whether it matches. */ const unicodePunctuation = regexCheck(/\p{P}|\p{S}/u); /** * Check whether the character code represents Unicode whitespace. * * Note that this does handle micromark specific markdown whitespace characters. * See `markdownLineEndingOrSpace` to check that. * * A **Unicode whitespace** is a character in the Unicode `Zs` (Separator, * Space) category, or U+0009 CHARACTER TABULATION (HT), U+000A LINE FEED (LF), * U+000C (FF), or U+000D CARRIAGE RETURN (CR) (**\[UNICODE]**). * * See: * **\[UNICODE]**: * [The Unicode Standard](https://www.unicode.org/versions/). * Unicode Consortium. * * @param code * Code. * @returns * Whether it matches. */ const unicodeWhitespace = regexCheck(/\s/); /** * Create a code check from a regex. * * @param {RegExp} regex * @returns {(code: Code) => boolean} */ function regexCheck(regex) { return check; /** * Check whether a code matches the bound regex. * * @param {Code} code * Character code. * @returns {boolean} * Whether the character code matches the bound regex. */ function check(code) { return code !== null && code > -1 && regex.test(String.fromCharCode(code)); } } /** * @typedef {import('micromark-util-types').Effects} Effects * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenType} TokenType */ // To do: implement `spaceOrTab`, `spaceOrTabMinMax`, `spaceOrTabWithOptions`. /** * Parse spaces and tabs. * * There is no `nok` parameter: * * * spaces in markdown are often optional, in which case this factory can be * used and `ok` will be switched to whether spaces were found or not * * one line ending or space can be detected with `markdownSpace(code)` right * before using `factorySpace` * * ###### Examples * * Where `␉` represents a tab (plus how much it expands) and `␠` represents a * single space. * * ```markdown * ␉ * ␠␠␠␠ * ␉␠ * ``` * * @param {Effects} effects * Context. * @param {State} ok * State switched to when successful. * @param {TokenType} type * Type (`' \t'`). * @param {number | undefined} [max=Infinity] * Max (exclusive). * @returns {State} * Start state. */ function factorySpace(effects, ok, type, max) { const limit = max ? max - 1 : Number.POSITIVE_INFINITY; let size = 0; return start /** @type {State} */ function start(code) { if (markdownSpace(code)) { effects.enter(type); return prefix(code) } return ok(code) } /** @type {State} */ function prefix(code) { if (markdownSpace(code) && size++ < limit) { effects.consume(code); return prefix } effects.exit(type); return ok(code) } } /** * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct * @typedef {import('micromark-util-types').Initializer} Initializer * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext */ /** @type {InitialConstruct} */ const content$1 = { tokenize: initializeContent }; /** * @this {TokenizeContext} * @type {Initializer} */ function initializeContent(effects) { const contentStart = effects.attempt( this.parser.constructs.contentInitial, afterContentStartConstruct, paragraphInitial ); /** @type {Token} */ let previous; return contentStart /** @type {State} */ function afterContentStartConstruct(code) { if (code === null) { effects.consume(code); return } effects.enter('lineEnding'); effects.consume(code); effects.exit('lineEnding'); return factorySpace(effects, contentStart, 'linePrefix') } /** @type {State} */ function paragraphInitial(code) { effects.enter('paragraph'); return lineStart(code) } /** @type {State} */ function lineStart(code) { const token = effects.enter('chunkText', { contentType: 'text', previous }); if (previous) { previous.next = token; } previous = token; return data(code) } /** @type {State} */ function data(code) { if (code === null) { effects.exit('chunkText'); effects.exit('paragraph'); effects.consume(code); return } if (markdownLineEnding(code)) { effects.consume(code); effects.exit('chunkText'); return lineStart } // Data. effects.consume(code); return data } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').ContainerState} ContainerState * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct * @typedef {import('micromark-util-types').Initializer} Initializer * @typedef {import('micromark-util-types').Point} Point * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {InitialConstruct} */ const document$2 = { tokenize: initializeDocument }; /** @type {Construct} */ const containerConstruct = { tokenize: tokenizeContainer }; /** * @this {TokenizeContext} * @type {Initializer} */ function initializeDocument(effects) { const self = this; /** @type {Array} */ const stack = []; let continued = 0; /** @type {TokenizeContext | undefined} */ let childFlow; /** @type {Token | undefined} */ let childToken; /** @type {number} */ let lineStartOffset; return start /** @type {State} */ function start(code) { // First we iterate through the open blocks, starting with the root // document, and descending through last children down to the last open // block. // Each block imposes a condition that the line must satisfy if the block is // to remain open. // For example, a block quote requires a `>` character. // A paragraph requires a non-blank line. // In this phase we may match all or just some of the open blocks. // But we cannot close unmatched blocks yet, because we may have a lazy // continuation line. if (continued < stack.length) { const item = stack[continued]; self.containerState = item[1]; return effects.attempt( item[0].continuation, documentContinue, checkNewContainers )(code) } // Done. return checkNewContainers(code) } /** @type {State} */ function documentContinue(code) { continued++; // Note: this field is called `_closeFlow` but it also closes containers. // Perhaps a good idea to rename it but it’s already used in the wild by // extensions. if (self.containerState._closeFlow) { self.containerState._closeFlow = undefined; if (childFlow) { closeFlow(); } // Note: this algorithm for moving events around is similar to the // algorithm when dealing with lazy lines in `writeToChild`. const indexBeforeExits = self.events.length; let indexBeforeFlow = indexBeforeExits; /** @type {Point | undefined} */ let point; // Find the flow chunk. while (indexBeforeFlow--) { if ( self.events[indexBeforeFlow][0] === 'exit' && self.events[indexBeforeFlow][1].type === 'chunkFlow' ) { point = self.events[indexBeforeFlow][1].end; break } } exitContainers(continued); // Fix positions. let index = indexBeforeExits; while (index < self.events.length) { self.events[index][1].end = Object.assign({}, point); index++; } // Inject the exits earlier (they’re still also at the end). splice$1( self.events, indexBeforeFlow + 1, 0, self.events.slice(indexBeforeExits) ); // Discard the duplicate exits. self.events.length = index; return checkNewContainers(code) } return start(code) } /** @type {State} */ function checkNewContainers(code) { // Next, after consuming the continuation markers for existing blocks, we // look for new block starts (e.g. `>` for a block quote). // If we encounter a new block start, we close any blocks unmatched in // step 1 before creating the new block as a child of the last matched // block. if (continued === stack.length) { // No need to `check` whether there’s a container, of `exitContainers` // would be moot. // We can instead immediately `attempt` to parse one. if (!childFlow) { return documentContinued(code) } // If we have concrete content, such as block HTML or fenced code, // we can’t have containers “pierce” into them, so we can immediately // start. if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) { return flowStart(code) } // If we do have flow, it could still be a blank line, // but we’d be interrupting it w/ a new container if there’s a current // construct. // To do: next major: remove `_gfmTableDynamicInterruptHack` (no longer // needed in micromark-extension-gfm-table@1.0.6). self.interrupt = Boolean( childFlow.currentConstruct && !childFlow._gfmTableDynamicInterruptHack ); } // Check if there is a new container. self.containerState = {}; return effects.check( containerConstruct, thereIsANewContainer, thereIsNoNewContainer )(code) } /** @type {State} */ function thereIsANewContainer(code) { if (childFlow) closeFlow(); exitContainers(continued); return documentContinued(code) } /** @type {State} */ function thereIsNoNewContainer(code) { self.parser.lazy[self.now().line] = continued !== stack.length; lineStartOffset = self.now().offset; return flowStart(code) } /** @type {State} */ function documentContinued(code) { // Try new containers. self.containerState = {}; return effects.attempt( containerConstruct, containerContinue, flowStart )(code) } /** @type {State} */ function containerContinue(code) { continued++; stack.push([self.currentConstruct, self.containerState]); // Try another. return documentContinued(code) } /** @type {State} */ function flowStart(code) { if (code === null) { if (childFlow) closeFlow(); exitContainers(0); effects.consume(code); return } childFlow = childFlow || self.parser.flow(self.now()); effects.enter('chunkFlow', { contentType: 'flow', previous: childToken, _tokenizer: childFlow }); return flowContinue(code) } /** @type {State} */ function flowContinue(code) { if (code === null) { writeToChild(effects.exit('chunkFlow'), true); exitContainers(0); effects.consume(code); return } if (markdownLineEnding(code)) { effects.consume(code); writeToChild(effects.exit('chunkFlow')); // Get ready for the next line. continued = 0; self.interrupt = undefined; return start } effects.consume(code); return flowContinue } /** * @param {Token} token * @param {boolean | undefined} [eof] * @returns {undefined} */ function writeToChild(token, eof) { const stream = self.sliceStream(token); if (eof) stream.push(null); token.previous = childToken; if (childToken) childToken.next = token; childToken = token; childFlow.defineSkip(token.start); childFlow.write(stream); // Alright, so we just added a lazy line: // // ```markdown // > a // b. // // Or: // // > ~~~c // d // // Or: // // > | e | // f // ``` // // The construct in the second example (fenced code) does not accept lazy // lines, so it marked itself as done at the end of its first line, and // then the content construct parses `d`. // Most constructs in markdown match on the first line: if the first line // forms a construct, a non-lazy line can’t “unmake” it. // // The construct in the third example is potentially a GFM table, and // those are *weird*. // It *could* be a table, from the first line, if the following line // matches a condition. // In this case, that second line is lazy, which “unmakes” the first line // and turns the whole into one content block. // // We’ve now parsed the non-lazy and the lazy line, and can figure out // whether the lazy line started a new flow block. // If it did, we exit the current containers between the two flow blocks. if (self.parser.lazy[token.start.line]) { let index = childFlow.events.length; while (index--) { if ( // The token starts before the line ending… childFlow.events[index][1].start.offset < lineStartOffset && // …and either is not ended yet… (!childFlow.events[index][1].end || // …or ends after it. childFlow.events[index][1].end.offset > lineStartOffset) ) { // Exit: there’s still something open, which means it’s a lazy line // part of something. return } } // Note: this algorithm for moving events around is similar to the // algorithm when closing flow in `documentContinue`. const indexBeforeExits = self.events.length; let indexBeforeFlow = indexBeforeExits; /** @type {boolean | undefined} */ let seen; /** @type {Point | undefined} */ let point; // Find the previous chunk (the one before the lazy line). while (indexBeforeFlow--) { if ( self.events[indexBeforeFlow][0] === 'exit' && self.events[indexBeforeFlow][1].type === 'chunkFlow' ) { if (seen) { point = self.events[indexBeforeFlow][1].end; break } seen = true; } } exitContainers(continued); // Fix positions. index = indexBeforeExits; while (index < self.events.length) { self.events[index][1].end = Object.assign({}, point); index++; } // Inject the exits earlier (they’re still also at the end). splice$1( self.events, indexBeforeFlow + 1, 0, self.events.slice(indexBeforeExits) ); // Discard the duplicate exits. self.events.length = index; } } /** * @param {number} size * @returns {undefined} */ function exitContainers(size) { let index = stack.length; // Exit open containers. while (index-- > size) { const entry = stack[index]; self.containerState = entry[1]; entry[0].exit.call(self, effects); } stack.length = size; } function closeFlow() { childFlow.write([null]); childToken = undefined; childFlow = undefined; self.containerState._closeFlow = undefined; } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeContainer(effects, ok, nok) { // Always populated by defaults. return factorySpace( effects, effects.attempt(this.parser.constructs.document, ok, nok), 'linePrefix', this.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4 ) } /** * @typedef {import('micromark-util-types').Code} Code */ /** * Classify whether a code represents whitespace, punctuation, or something * else. * * Used for attention (emphasis, strong), whose sequences can open or close * based on the class of surrounding characters. * * > 👉 **Note**: eof (`null`) is seen as whitespace. * * @param {Code} code * Code. * @returns {typeof constants.characterGroupWhitespace | typeof constants.characterGroupPunctuation | undefined} * Group. */ function classifyCharacter(code) { if ( code === null || markdownLineEndingOrSpace(code) || unicodeWhitespace(code) ) { return 1 } if (unicodePunctuation(code)) { return 2 } } /** * @typedef {import('micromark-util-types').Event} Event * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext */ /** * Call all `resolveAll`s. * * @param {Array<{resolveAll?: Resolver | undefined}>} constructs * List of constructs, optionally with `resolveAll`s. * @param {Array} events * List of events. * @param {TokenizeContext} context * Context used by `tokenize`. * @returns {Array} * Changed events. */ function resolveAll(constructs, events, context) { /** @type {Array} */ const called = []; let index = -1; while (++index < constructs.length) { const resolve = constructs[index].resolveAll; if (resolve && !called.includes(resolve)) { events = resolve(events, context); called.push(resolve); } } return events } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').Event} Event * @typedef {import('micromark-util-types').Point} Point * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const attention = { name: 'attention', tokenize: tokenizeAttention, resolveAll: resolveAllAttention }; /** * Take all events and resolve attention to emphasis or strong. * * @type {Resolver} */ // eslint-disable-next-line complexity function resolveAllAttention(events, context) { let index = -1; /** @type {number} */ let open; /** @type {Token} */ let group; /** @type {Token} */ let text; /** @type {Token} */ let openingSequence; /** @type {Token} */ let closingSequence; /** @type {number} */ let use; /** @type {Array} */ let nextEvents; /** @type {number} */ let offset; // Walk through all events. // // Note: performance of this is fine on an mb of normal markdown, but it’s // a bottleneck for malicious stuff. while (++index < events.length) { // Find a token that can close. if (events[index][0] === 'enter' && events[index][1].type === 'attentionSequence' && events[index][1]._close) { open = index; // Now walk back to find an opener. while (open--) { // Find a token that can open the closer. if (events[open][0] === 'exit' && events[open][1].type === 'attentionSequence' && events[open][1]._open && // If the markers are the same: context.sliceSerialize(events[open][1]).charCodeAt(0) === context.sliceSerialize(events[index][1]).charCodeAt(0)) { // If the opening can close or the closing can open, // and the close size *is not* a multiple of three, // but the sum of the opening and closing size *is* multiple of three, // then don’t match. if ((events[open][1]._close || events[index][1]._open) && (events[index][1].end.offset - events[index][1].start.offset) % 3 && !((events[open][1].end.offset - events[open][1].start.offset + events[index][1].end.offset - events[index][1].start.offset) % 3)) { continue; } // Number of markers to use from the sequence. use = events[open][1].end.offset - events[open][1].start.offset > 1 && events[index][1].end.offset - events[index][1].start.offset > 1 ? 2 : 1; const start = Object.assign({}, events[open][1].end); const end = Object.assign({}, events[index][1].start); movePoint(start, -use); movePoint(end, use); openingSequence = { type: use > 1 ? "strongSequence" : "emphasisSequence", start, end: Object.assign({}, events[open][1].end) }; closingSequence = { type: use > 1 ? "strongSequence" : "emphasisSequence", start: Object.assign({}, events[index][1].start), end }; text = { type: use > 1 ? "strongText" : "emphasisText", start: Object.assign({}, events[open][1].end), end: Object.assign({}, events[index][1].start) }; group = { type: use > 1 ? "strong" : "emphasis", start: Object.assign({}, openingSequence.start), end: Object.assign({}, closingSequence.end) }; events[open][1].end = Object.assign({}, openingSequence.start); events[index][1].start = Object.assign({}, closingSequence.end); nextEvents = []; // If there are more markers in the opening, add them before. if (events[open][1].end.offset - events[open][1].start.offset) { nextEvents = push(nextEvents, [['enter', events[open][1], context], ['exit', events[open][1], context]]); } // Opening. nextEvents = push(nextEvents, [['enter', group, context], ['enter', openingSequence, context], ['exit', openingSequence, context], ['enter', text, context]]); // Always populated by defaults. // Between. nextEvents = push(nextEvents, resolveAll(context.parser.constructs.insideSpan.null, events.slice(open + 1, index), context)); // Closing. nextEvents = push(nextEvents, [['exit', text, context], ['enter', closingSequence, context], ['exit', closingSequence, context], ['exit', group, context]]); // If there are more markers in the closing, add them after. if (events[index][1].end.offset - events[index][1].start.offset) { offset = 2; nextEvents = push(nextEvents, [['enter', events[index][1], context], ['exit', events[index][1], context]]); } else { offset = 0; } splice$1(events, open - 1, index - open + 3, nextEvents); index = open + nextEvents.length - offset - 2; break; } } } } // Remove remaining sequences. index = -1; while (++index < events.length) { if (events[index][1].type === 'attentionSequence') { events[index][1].type = 'data'; } } return events; } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeAttention(effects, ok) { const attentionMarkers = this.parser.constructs.attentionMarkers.null; const previous = this.previous; const before = classifyCharacter(previous); /** @type {NonNullable} */ let marker; return start; /** * Before a sequence. * * ```markdown * > | ** * ^ * ``` * * @type {State} */ function start(code) { marker = code; effects.enter('attentionSequence'); return inside(code); } /** * In a sequence. * * ```markdown * > | ** * ^^ * ``` * * @type {State} */ function inside(code) { if (code === marker) { effects.consume(code); return inside; } const token = effects.exit('attentionSequence'); // To do: next major: move this to resolver, just like `markdown-rs`. const after = classifyCharacter(code); // Always populated by defaults. const open = !after || after === 2 && before || attentionMarkers.includes(code); const close = !before || before === 2 && after || attentionMarkers.includes(previous); token._open = Boolean(marker === 42 ? open : open && (before || !close)); token._close = Boolean(marker === 42 ? close : close && (after || !open)); return ok(code); } } /** * Move a point a bit. * * Note: `move` only works inside lines! It’s not possible to move past other * chunks (replacement characters, tabs, or line endings). * * @param {Point} point * @param {number} offset * @returns {undefined} */ function movePoint(point, offset) { point.column += offset; point.offset += offset; point._bufferIndex += offset; } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const autolink = { name: 'autolink', tokenize: tokenizeAutolink }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeAutolink(effects, ok, nok) { let size = 0; return start; /** * Start of an autolink. * * ```markdown * > | ab * ^ * > | ab * ^ * ``` * * @type {State} */ function start(code) { effects.enter("autolink"); effects.enter("autolinkMarker"); effects.consume(code); effects.exit("autolinkMarker"); effects.enter("autolinkProtocol"); return open; } /** * After `<`, at protocol or atext. * * ```markdown * > | ab * ^ * > | ab * ^ * ``` * * @type {State} */ function open(code) { if (asciiAlpha(code)) { effects.consume(code); return schemeOrEmailAtext; } if (code === 64) { return nok(code); } return emailAtext(code); } /** * At second byte of protocol or atext. * * ```markdown * > | ab * ^ * > | ab * ^ * ``` * * @type {State} */ function schemeOrEmailAtext(code) { // ASCII alphanumeric and `+`, `-`, and `.`. if (code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) { // Count the previous alphabetical from `open` too. size = 1; return schemeInsideOrEmailAtext(code); } return emailAtext(code); } /** * In ambiguous protocol or atext. * * ```markdown * > | ab * ^ * > | ab * ^ * ``` * * @type {State} */ function schemeInsideOrEmailAtext(code) { if (code === 58) { effects.consume(code); size = 0; return urlInside; } // ASCII alphanumeric and `+`, `-`, and `.`. if ((code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) && size++ < 32) { effects.consume(code); return schemeInsideOrEmailAtext; } size = 0; return emailAtext(code); } /** * After protocol, in URL. * * ```markdown * > | ab * ^ * ``` * * @type {State} */ function urlInside(code) { if (code === 62) { effects.exit("autolinkProtocol"); effects.enter("autolinkMarker"); effects.consume(code); effects.exit("autolinkMarker"); effects.exit("autolink"); return ok; } // ASCII control, space, or `<`. if (code === null || code === 32 || code === 60 || asciiControl(code)) { return nok(code); } effects.consume(code); return urlInside; } /** * In email atext. * * ```markdown * > | ab * ^ * ``` * * @type {State} */ function emailAtext(code) { if (code === 64) { effects.consume(code); return emailAtSignOrDot; } if (asciiAtext(code)) { effects.consume(code); return emailAtext; } return nok(code); } /** * In label, after at-sign or dot. * * ```markdown * > | ab * ^ ^ * ``` * * @type {State} */ function emailAtSignOrDot(code) { return asciiAlphanumeric(code) ? emailLabel(code) : nok(code); } /** * In label, where `.` and `>` are allowed. * * ```markdown * > | ab * ^ * ``` * * @type {State} */ function emailLabel(code) { if (code === 46) { effects.consume(code); size = 0; return emailAtSignOrDot; } if (code === 62) { // Exit, then change the token type. effects.exit("autolinkProtocol").type = "autolinkEmail"; effects.enter("autolinkMarker"); effects.consume(code); effects.exit("autolinkMarker"); effects.exit("autolink"); return ok; } return emailValue(code); } /** * In label, where `.` and `>` are *not* allowed. * * Though, this is also used in `emailLabel` to parse other values. * * ```markdown * > | ab * ^ * ``` * * @type {State} */ function emailValue(code) { // ASCII alphanumeric or `-`. if ((code === 45 || asciiAlphanumeric(code)) && size++ < 63) { const next = code === 45 ? emailValue : emailLabel; effects.consume(code); return next; } return nok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const blankLine = { tokenize: tokenizeBlankLine, partial: true }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeBlankLine(effects, ok, nok) { return start; /** * Start of blank line. * * > 👉 **Note**: `␠` represents a space character. * * ```markdown * > | ␠␠␊ * ^ * > | ␊ * ^ * ``` * * @type {State} */ function start(code) { return markdownSpace(code) ? factorySpace(effects, after, "linePrefix")(code) : after(code); } /** * At eof/eol, after optional whitespace. * * > 👉 **Note**: `␠` represents a space character. * * ```markdown * > | ␠␠␊ * ^ * > | ␊ * ^ * ``` * * @type {State} */ function after(code) { return code === null || markdownLineEnding(code) ? ok(code) : nok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').Exiter} Exiter * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const blockQuote = { name: 'blockQuote', tokenize: tokenizeBlockQuoteStart, continuation: { tokenize: tokenizeBlockQuoteContinuation }, exit }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeBlockQuoteStart(effects, ok, nok) { const self = this; return start; /** * Start of block quote. * * ```markdown * > | > a * ^ * ``` * * @type {State} */ function start(code) { if (code === 62) { const state = self.containerState; if (!state.open) { effects.enter("blockQuote", { _container: true }); state.open = true; } effects.enter("blockQuotePrefix"); effects.enter("blockQuoteMarker"); effects.consume(code); effects.exit("blockQuoteMarker"); return after; } return nok(code); } /** * After `>`, before optional whitespace. * * ```markdown * > | > a * ^ * ``` * * @type {State} */ function after(code) { if (markdownSpace(code)) { effects.enter("blockQuotePrefixWhitespace"); effects.consume(code); effects.exit("blockQuotePrefixWhitespace"); effects.exit("blockQuotePrefix"); return ok; } effects.exit("blockQuotePrefix"); return ok(code); } } /** * Start of block quote continuation. * * ```markdown * | > a * > | > b * ^ * ``` * * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeBlockQuoteContinuation(effects, ok, nok) { const self = this; return contStart; /** * Start of block quote continuation. * * Also used to parse the first block quote opening. * * ```markdown * | > a * > | > b * ^ * ``` * * @type {State} */ function contStart(code) { if (markdownSpace(code)) { // Always populated by defaults. return factorySpace(effects, contBefore, "linePrefix", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code); } return contBefore(code); } /** * At `>`, after optional whitespace. * * Also used to parse the first block quote opening. * * ```markdown * | > a * > | > b * ^ * ``` * * @type {State} */ function contBefore(code) { return effects.attempt(blockQuote, ok, nok)(code); } } /** @type {Exiter} */ function exit(effects) { effects.exit("blockQuote"); } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const characterEscape = { name: 'characterEscape', tokenize: tokenizeCharacterEscape }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeCharacterEscape(effects, ok, nok) { return start; /** * Start of character escape. * * ```markdown * > | a\*b * ^ * ``` * * @type {State} */ function start(code) { effects.enter("characterEscape"); effects.enter("escapeMarker"); effects.consume(code); effects.exit("escapeMarker"); return inside; } /** * After `\`, at punctuation. * * ```markdown * > | a\*b * ^ * ``` * * @type {State} */ function inside(code) { // ASCII punctuation. if (asciiPunctuation(code)) { effects.enter("characterEscapeValue"); effects.consume(code); effects.exit("characterEscapeValue"); effects.exit("characterEscape"); return ok; } return nok(code); } } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const characterReference = { name: 'characterReference', tokenize: tokenizeCharacterReference }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeCharacterReference(effects, ok, nok) { const self = this; let size = 0; /** @type {number} */ let max; /** @type {(code: Code) => boolean} */ let test; return start; /** * Start of character reference. * * ```markdown * > | a&b * ^ * > | a{b * ^ * > | a b * ^ * ``` * * @type {State} */ function start(code) { effects.enter("characterReference"); effects.enter("characterReferenceMarker"); effects.consume(code); effects.exit("characterReferenceMarker"); return open; } /** * After `&`, at `#` for numeric references or alphanumeric for named * references. * * ```markdown * > | a&b * ^ * > | a{b * ^ * > | a b * ^ * ``` * * @type {State} */ function open(code) { if (code === 35) { effects.enter("characterReferenceMarkerNumeric"); effects.consume(code); effects.exit("characterReferenceMarkerNumeric"); return numeric; } effects.enter("characterReferenceValue"); max = 31; test = asciiAlphanumeric; return value(code); } /** * After `#`, at `x` for hexadecimals or digit for decimals. * * ```markdown * > | a{b * ^ * > | a b * ^ * ``` * * @type {State} */ function numeric(code) { if (code === 88 || code === 120) { effects.enter("characterReferenceMarkerHexadecimal"); effects.consume(code); effects.exit("characterReferenceMarkerHexadecimal"); effects.enter("characterReferenceValue"); max = 6; test = asciiHexDigit; return value; } effects.enter("characterReferenceValue"); max = 7; test = asciiDigit; return value(code); } /** * After markers (`&#x`, `&#`, or `&`), in value, before `;`. * * The character reference kind defines what and how many characters are * allowed. * * ```markdown * > | a&b * ^^^ * > | a{b * ^^^ * > | a b * ^ * ``` * * @type {State} */ function value(code) { if (code === 59 && size) { const token = effects.exit("characterReferenceValue"); if (test === asciiAlphanumeric && !decodeNamedCharacterReference(self.sliceSerialize(token))) { return nok(code); } // To do: `markdown-rs` uses a different name: // `CharacterReferenceMarkerSemi`. effects.enter("characterReferenceMarker"); effects.consume(code); effects.exit("characterReferenceMarker"); effects.exit("characterReference"); return ok; } if (test(code) && size++ < max) { effects.consume(code); return value; } return nok(code); } } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const nonLazyContinuation = { tokenize: tokenizeNonLazyContinuation, partial: true }; /** @type {Construct} */ const codeFenced = { name: 'codeFenced', tokenize: tokenizeCodeFenced, concrete: true }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeCodeFenced(effects, ok, nok) { const self = this; /** @type {Construct} */ const closeStart = { tokenize: tokenizeCloseStart, partial: true }; let initialPrefix = 0; let sizeOpen = 0; /** @type {NonNullable} */ let marker; return start; /** * Start of code. * * ```markdown * > | ~~~js * ^ * | alert(1) * | ~~~ * ``` * * @type {State} */ function start(code) { // To do: parse whitespace like `markdown-rs`. return beforeSequenceOpen(code); } /** * In opening fence, after prefix, at sequence. * * ```markdown * > | ~~~js * ^ * | alert(1) * | ~~~ * ``` * * @type {State} */ function beforeSequenceOpen(code) { const tail = self.events[self.events.length - 1]; initialPrefix = tail && tail[1].type === "linePrefix" ? tail[2].sliceSerialize(tail[1], true).length : 0; marker = code; effects.enter("codeFenced"); effects.enter("codeFencedFence"); effects.enter("codeFencedFenceSequence"); return sequenceOpen(code); } /** * In opening fence sequence. * * ```markdown * > | ~~~js * ^ * | alert(1) * | ~~~ * ``` * * @type {State} */ function sequenceOpen(code) { if (code === marker) { sizeOpen++; effects.consume(code); return sequenceOpen; } if (sizeOpen < 3) { return nok(code); } effects.exit("codeFencedFenceSequence"); return markdownSpace(code) ? factorySpace(effects, infoBefore, "whitespace")(code) : infoBefore(code); } /** * In opening fence, after the sequence (and optional whitespace), before info. * * ```markdown * > | ~~~js * ^ * | alert(1) * | ~~~ * ``` * * @type {State} */ function infoBefore(code) { if (code === null || markdownLineEnding(code)) { effects.exit("codeFencedFence"); return self.interrupt ? ok(code) : effects.check(nonLazyContinuation, atNonLazyBreak, after)(code); } effects.enter("codeFencedFenceInfo"); effects.enter("chunkString", { contentType: "string" }); return info(code); } /** * In info. * * ```markdown * > | ~~~js * ^ * | alert(1) * | ~~~ * ``` * * @type {State} */ function info(code) { if (code === null || markdownLineEnding(code)) { effects.exit("chunkString"); effects.exit("codeFencedFenceInfo"); return infoBefore(code); } if (markdownSpace(code)) { effects.exit("chunkString"); effects.exit("codeFencedFenceInfo"); return factorySpace(effects, metaBefore, "whitespace")(code); } if (code === 96 && code === marker) { return nok(code); } effects.consume(code); return info; } /** * In opening fence, after info and whitespace, before meta. * * ```markdown * > | ~~~js eval * ^ * | alert(1) * | ~~~ * ``` * * @type {State} */ function metaBefore(code) { if (code === null || markdownLineEnding(code)) { return infoBefore(code); } effects.enter("codeFencedFenceMeta"); effects.enter("chunkString", { contentType: "string" }); return meta(code); } /** * In meta. * * ```markdown * > | ~~~js eval * ^ * | alert(1) * | ~~~ * ``` * * @type {State} */ function meta(code) { if (code === null || markdownLineEnding(code)) { effects.exit("chunkString"); effects.exit("codeFencedFenceMeta"); return infoBefore(code); } if (code === 96 && code === marker) { return nok(code); } effects.consume(code); return meta; } /** * At eol/eof in code, before a non-lazy closing fence or content. * * ```markdown * > | ~~~js * ^ * > | alert(1) * ^ * | ~~~ * ``` * * @type {State} */ function atNonLazyBreak(code) { return effects.attempt(closeStart, after, contentBefore)(code); } /** * Before code content, not a closing fence, at eol. * * ```markdown * | ~~~js * > | alert(1) * ^ * | ~~~ * ``` * * @type {State} */ function contentBefore(code) { effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return contentStart; } /** * Before code content, not a closing fence. * * ```markdown * | ~~~js * > | alert(1) * ^ * | ~~~ * ``` * * @type {State} */ function contentStart(code) { return initialPrefix > 0 && markdownSpace(code) ? factorySpace(effects, beforeContentChunk, "linePrefix", initialPrefix + 1)(code) : beforeContentChunk(code); } /** * Before code content, after optional prefix. * * ```markdown * | ~~~js * > | alert(1) * ^ * | ~~~ * ``` * * @type {State} */ function beforeContentChunk(code) { if (code === null || markdownLineEnding(code)) { return effects.check(nonLazyContinuation, atNonLazyBreak, after)(code); } effects.enter("codeFlowValue"); return contentChunk(code); } /** * In code content. * * ```markdown * | ~~~js * > | alert(1) * ^^^^^^^^ * | ~~~ * ``` * * @type {State} */ function contentChunk(code) { if (code === null || markdownLineEnding(code)) { effects.exit("codeFlowValue"); return beforeContentChunk(code); } effects.consume(code); return contentChunk; } /** * After code. * * ```markdown * | ~~~js * | alert(1) * > | ~~~ * ^ * ``` * * @type {State} */ function after(code) { effects.exit("codeFenced"); return ok(code); } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeCloseStart(effects, ok, nok) { let size = 0; return startBefore; /** * * * @type {State} */ function startBefore(code) { effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return start; } /** * Before closing fence, at optional whitespace. * * ```markdown * | ~~~js * | alert(1) * > | ~~~ * ^ * ``` * * @type {State} */ function start(code) { // Always populated by defaults. // To do: `enter` here or in next state? effects.enter("codeFencedFence"); return markdownSpace(code) ? factorySpace(effects, beforeSequenceClose, "linePrefix", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code) : beforeSequenceClose(code); } /** * In closing fence, after optional whitespace, at sequence. * * ```markdown * | ~~~js * | alert(1) * > | ~~~ * ^ * ``` * * @type {State} */ function beforeSequenceClose(code) { if (code === marker) { effects.enter("codeFencedFenceSequence"); return sequenceClose(code); } return nok(code); } /** * In closing fence sequence. * * ```markdown * | ~~~js * | alert(1) * > | ~~~ * ^ * ``` * * @type {State} */ function sequenceClose(code) { if (code === marker) { size++; effects.consume(code); return sequenceClose; } if (size >= sizeOpen) { effects.exit("codeFencedFenceSequence"); return markdownSpace(code) ? factorySpace(effects, sequenceCloseAfter, "whitespace")(code) : sequenceCloseAfter(code); } return nok(code); } /** * After closing fence sequence, after optional whitespace. * * ```markdown * | ~~~js * | alert(1) * > | ~~~ * ^ * ``` * * @type {State} */ function sequenceCloseAfter(code) { if (code === null || markdownLineEnding(code)) { effects.exit("codeFencedFence"); return ok(code); } return nok(code); } } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeNonLazyContinuation(effects, ok, nok) { const self = this; return start; /** * * * @type {State} */ function start(code) { if (code === null) { return nok(code); } effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return lineStart; } /** * * * @type {State} */ function lineStart(code) { return self.parser.lazy[self.now().line] ? nok(code) : ok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const codeIndented = { name: 'codeIndented', tokenize: tokenizeCodeIndented }; /** @type {Construct} */ const furtherStart = { tokenize: tokenizeFurtherStart, partial: true }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeCodeIndented(effects, ok, nok) { const self = this; return start; /** * Start of code (indented). * * > **Parsing note**: it is not needed to check if this first line is a * > filled line (that it has a non-whitespace character), because blank lines * > are parsed already, so we never run into that. * * ```markdown * > | aaa * ^ * ``` * * @type {State} */ function start(code) { // To do: manually check if interrupting like `markdown-rs`. effects.enter("codeIndented"); // To do: use an improved `space_or_tab` function like `markdown-rs`, // so that we can drop the next state. return factorySpace(effects, afterPrefix, "linePrefix", 4 + 1)(code); } /** * At start, after 1 or 4 spaces. * * ```markdown * > | aaa * ^ * ``` * * @type {State} */ function afterPrefix(code) { const tail = self.events[self.events.length - 1]; return tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4 ? atBreak(code) : nok(code); } /** * At a break. * * ```markdown * > | aaa * ^ ^ * ``` * * @type {State} */ function atBreak(code) { if (code === null) { return after(code); } if (markdownLineEnding(code)) { return effects.attempt(furtherStart, atBreak, after)(code); } effects.enter("codeFlowValue"); return inside(code); } /** * In code content. * * ```markdown * > | aaa * ^^^^ * ``` * * @type {State} */ function inside(code) { if (code === null || markdownLineEnding(code)) { effects.exit("codeFlowValue"); return atBreak(code); } effects.consume(code); return inside; } /** @type {State} */ function after(code) { effects.exit("codeIndented"); // To do: allow interrupting like `markdown-rs`. // Feel free to interrupt. // tokenizer.interrupt = false return ok(code); } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeFurtherStart(effects, ok, nok) { const self = this; return furtherStart; /** * At eol, trying to parse another indent. * * ```markdown * > | aaa * ^ * | bbb * ``` * * @type {State} */ function furtherStart(code) { // To do: improve `lazy` / `pierce` handling. // If this is a lazy line, it can’t be code. if (self.parser.lazy[self.now().line]) { return nok(code); } if (markdownLineEnding(code)) { effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return furtherStart; } // To do: the code here in `micromark-js` is a bit different from // `markdown-rs` because there it can attempt spaces. // We can’t yet. // // To do: use an improved `space_or_tab` function like `markdown-rs`, // so that we can drop the next state. return factorySpace(effects, afterPrefix, "linePrefix", 4 + 1)(code); } /** * At start, after 1 or 4 spaces. * * ```markdown * > | aaa * ^ * ``` * * @type {State} */ function afterPrefix(code) { const tail = self.events[self.events.length - 1]; return tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4 ? ok(code) : markdownLineEnding(code) ? furtherStart(code) : nok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').Previous} Previous * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const codeText = { name: 'codeText', tokenize: tokenizeCodeText, resolve: resolveCodeText, previous }; // To do: next major: don’t resolve, like `markdown-rs`. /** @type {Resolver} */ function resolveCodeText(events) { let tailExitIndex = events.length - 4; let headEnterIndex = 3; /** @type {number} */ let index; /** @type {number | undefined} */ let enter; // If we start and end with an EOL or a space. if ((events[headEnterIndex][1].type === "lineEnding" || events[headEnterIndex][1].type === 'space') && (events[tailExitIndex][1].type === "lineEnding" || events[tailExitIndex][1].type === 'space')) { index = headEnterIndex; // And we have data. while (++index < tailExitIndex) { if (events[index][1].type === "codeTextData") { // Then we have padding. events[headEnterIndex][1].type = "codeTextPadding"; events[tailExitIndex][1].type = "codeTextPadding"; headEnterIndex += 2; tailExitIndex -= 2; break; } } } // Merge adjacent spaces and data. index = headEnterIndex - 1; tailExitIndex++; while (++index <= tailExitIndex) { if (enter === undefined) { if (index !== tailExitIndex && events[index][1].type !== "lineEnding") { enter = index; } } else if (index === tailExitIndex || events[index][1].type === "lineEnding") { events[enter][1].type = "codeTextData"; if (index !== enter + 2) { events[enter][1].end = events[index - 1][1].end; events.splice(enter + 2, index - enter - 2); tailExitIndex -= index - enter - 2; index = enter + 2; } enter = undefined; } } return events; } /** * @this {TokenizeContext} * @type {Previous} */ function previous(code) { // If there is a previous code, there will always be a tail. return code !== 96 || this.events[this.events.length - 1][1].type === "characterEscape"; } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeCodeText(effects, ok, nok) { let sizeOpen = 0; /** @type {number} */ let size; /** @type {Token} */ let token; return start; /** * Start of code (text). * * ```markdown * > | `a` * ^ * > | \`a` * ^ * ``` * * @type {State} */ function start(code) { effects.enter("codeText"); effects.enter("codeTextSequence"); return sequenceOpen(code); } /** * In opening sequence. * * ```markdown * > | `a` * ^ * ``` * * @type {State} */ function sequenceOpen(code) { if (code === 96) { effects.consume(code); sizeOpen++; return sequenceOpen; } effects.exit("codeTextSequence"); return between(code); } /** * Between something and something else. * * ```markdown * > | `a` * ^^ * ``` * * @type {State} */ function between(code) { // EOF. if (code === null) { return nok(code); } // To do: next major: don’t do spaces in resolve, but when compiling, // like `markdown-rs`. // Tabs don’t work, and virtual spaces don’t make sense. if (code === 32) { effects.enter('space'); effects.consume(code); effects.exit('space'); return between; } // Closing fence? Could also be data. if (code === 96) { token = effects.enter("codeTextSequence"); size = 0; return sequenceClose(code); } if (markdownLineEnding(code)) { effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return between; } // Data. effects.enter("codeTextData"); return data(code); } /** * In data. * * ```markdown * > | `a` * ^ * ``` * * @type {State} */ function data(code) { if (code === null || code === 32 || code === 96 || markdownLineEnding(code)) { effects.exit("codeTextData"); return between(code); } effects.consume(code); return data; } /** * In closing sequence. * * ```markdown * > | `a` * ^ * ``` * * @type {State} */ function sequenceClose(code) { // More. if (code === 96) { effects.consume(code); size++; return sequenceClose; } // Done! if (size === sizeOpen) { effects.exit("codeTextSequence"); effects.exit("codeText"); return ok(code); } // More or less accents: mark as data. token.type = "codeTextData"; return data(code); } } /** * Some of the internal operations of micromark do lots of editing * operations on very large arrays. This runs into problems with two * properties of most circa-2020 JavaScript interpreters: * * - Array-length modifications at the high end of an array (push/pop) are * expected to be common and are implemented in (amortized) time * proportional to the number of elements added or removed, whereas * other operations (shift/unshift and splice) are much less efficient. * - Function arguments are passed on the stack, so adding tens of thousands * of elements to an array with `arr.push[...newElements]` will frequently * cause stack overflows. (see ) * * SpliceBuffers are an implementation of gap buffers, which are a * generalization of the "queue made of two stacks" idea. The splice buffer * maintains a cursor, and moving the cursor has cost proportional to the * distance the cursor moves, but inserting, deleting, or splicing in * new information at the cursor is as efficient as the push/pop operation. * This allows for an efficient sequence of splices (or pushes, pops, shifts, * or unshifts) as long such edits happen at the same part of the array or * generally sweep through the array from the beginning to the end. * * The interface for splice buffers also supports large numbers of inputs by * passing a single array argument rather passing multiple arguments on the * function call stack. * * @template T * Item type. */ class SpliceBuffer { /** * @param {ReadonlyArray | null | undefined} [initial] * Initial items (optional). * @returns * Splice buffer. */ constructor(initial) { /** @type {Array} */ this.left = initial ? [...initial] : []; /** @type {Array} */ this.right = []; } /** * Array access; * does not move the cursor. * * @param {number} index * Index. * @return {T} * Item. */ get(index) { if (index < 0 || index >= this.left.length + this.right.length) { throw new RangeError('Cannot access index `' + index + '` in a splice buffer of size `' + (this.left.length + this.right.length) + '`'); } if (index < this.left.length) return this.left[index]; return this.right[this.right.length - index + this.left.length - 1]; } /** * The length of the splice buffer, one greater than the largest index in the * array. */ get length() { return this.left.length + this.right.length; } /** * Remove and return `list[0]`; * moves the cursor to `0`. * * @returns {T | undefined} * Item, optional. */ shift() { this.setCursor(0); return this.right.pop(); } /** * Slice the buffer to get an array; * does not move the cursor. * * @param {number} start * Start. * @param {number | null | undefined} [end] * End (optional). * @returns {Array} * Array of items. */ slice(start, end) { /** @type {number} */ const stop = end === null || end === undefined ? Number.POSITIVE_INFINITY : end; if (stop < this.left.length) { return this.left.slice(start, stop); } if (start > this.left.length) { return this.right.slice(this.right.length - stop + this.left.length, this.right.length - start + this.left.length).reverse(); } return this.left.slice(start).concat(this.right.slice(this.right.length - stop + this.left.length).reverse()); } /** * Mimics the behavior of Array.prototype.splice() except for the change of * interface necessary to avoid segfaults when patching in very large arrays. * * This operation moves cursor is moved to `start` and results in the cursor * placed after any inserted items. * * @param {number} start * Start; * zero-based index at which to start changing the array; * negative numbers count backwards from the end of the array and values * that are out-of bounds are clamped to the appropriate end of the array. * @param {number | null | undefined} [deleteCount=0] * Delete count (default: `0`); * maximum number of elements to delete, starting from start. * @param {Array | null | undefined} [items=[]] * Items to include in place of the deleted items (default: `[]`). * @return {Array} * Any removed items. */ splice(start, deleteCount, items) { /** @type {number} */ const count = deleteCount || 0; this.setCursor(Math.trunc(start)); const removed = this.right.splice(this.right.length - count, Number.POSITIVE_INFINITY); if (items) chunkedPush(this.left, items); return removed.reverse(); } /** * Remove and return the highest-numbered item in the array, so * `list[list.length - 1]`; * Moves the cursor to `length`. * * @returns {T | undefined} * Item, optional. */ pop() { this.setCursor(Number.POSITIVE_INFINITY); return this.left.pop(); } /** * Inserts a single item to the high-numbered side of the array; * moves the cursor to `length`. * * @param {T} item * Item. * @returns {undefined} * Nothing. */ push(item) { this.setCursor(Number.POSITIVE_INFINITY); this.left.push(item); } /** * Inserts many items to the high-numbered side of the array. * Moves the cursor to `length`. * * @param {Array} items * Items. * @returns {undefined} * Nothing. */ pushMany(items) { this.setCursor(Number.POSITIVE_INFINITY); chunkedPush(this.left, items); } /** * Inserts a single item to the low-numbered side of the array; * Moves the cursor to `0`. * * @param {T} item * Item. * @returns {undefined} * Nothing. */ unshift(item) { this.setCursor(0); this.right.push(item); } /** * Inserts many items to the low-numbered side of the array; * moves the cursor to `0`. * * @param {Array} items * Items. * @returns {undefined} * Nothing. */ unshiftMany(items) { this.setCursor(0); chunkedPush(this.right, items.reverse()); } /** * Move the cursor to a specific position in the array. Requires * time proportional to the distance moved. * * If `n < 0`, the cursor will end up at the beginning. * If `n > length`, the cursor will end up at the end. * * @param {number} n * Position. * @return {undefined} * Nothing. */ setCursor(n) { if (n === this.left.length || n > this.left.length && this.right.length === 0 || n < 0 && this.left.length === 0) return; if (n < this.left.length) { // Move cursor to the this.left const removed = this.left.splice(n, Number.POSITIVE_INFINITY); chunkedPush(this.right, removed.reverse()); } else { // Move cursor to the this.right const removed = this.right.splice(this.left.length + this.right.length - n, Number.POSITIVE_INFINITY); chunkedPush(this.left, removed.reverse()); } } } /** * Avoid stack overflow by pushing items onto the stack in segments * * @template T * Item type. * @param {Array} list * List to inject into. * @param {ReadonlyArray} right * Items to inject. * @return {undefined} * Nothing. */ function chunkedPush(list, right) { /** @type {number} */ let chunkStart = 0; if (right.length < 10000) { list.push(...right); } else { while (chunkStart < right.length) { list.push(...right.slice(chunkStart, chunkStart + 10000)); chunkStart += 10000; } } } /** * @typedef {import('micromark-util-types').Chunk} Chunk * @typedef {import('micromark-util-types').Event} Event * @typedef {import('micromark-util-types').Token} Token */ /** * Tokenize subcontent. * * @param {Array} eventsArray * List of events. * @returns {boolean} * Whether subtokens were found. */ // eslint-disable-next-line complexity function subtokenize(eventsArray) { /** @type {Record} */ const jumps = {}; let index = -1; /** @type {Event} */ let event; /** @type {number | undefined} */ let lineIndex; /** @type {number} */ let otherIndex; /** @type {Event} */ let otherEvent; /** @type {Array} */ let parameters; /** @type {Array} */ let subevents; /** @type {boolean | undefined} */ let more; const events = new SpliceBuffer(eventsArray); while (++index < events.length) { while (index in jumps) { index = jumps[index]; } event = events.get(index); // Add a hook for the GFM tasklist extension, which needs to know if text // is in the first content of a list item. if (index && event[1].type === "chunkFlow" && events.get(index - 1)[1].type === "listItemPrefix") { subevents = event[1]._tokenizer.events; otherIndex = 0; if (otherIndex < subevents.length && subevents[otherIndex][1].type === "lineEndingBlank") { otherIndex += 2; } if (otherIndex < subevents.length && subevents[otherIndex][1].type === "content") { while (++otherIndex < subevents.length) { if (subevents[otherIndex][1].type === "content") { break; } if (subevents[otherIndex][1].type === "chunkText") { subevents[otherIndex][1]._isInFirstContentOfListItem = true; otherIndex++; } } } } // Enter. if (event[0] === 'enter') { if (event[1].contentType) { Object.assign(jumps, subcontent(events, index)); index = jumps[index]; more = true; } } // Exit. else if (event[1]._container) { otherIndex = index; lineIndex = undefined; while (otherIndex--) { otherEvent = events.get(otherIndex); if (otherEvent[1].type === "lineEnding" || otherEvent[1].type === "lineEndingBlank") { if (otherEvent[0] === 'enter') { if (lineIndex) { events.get(lineIndex)[1].type = "lineEndingBlank"; } otherEvent[1].type = "lineEnding"; lineIndex = otherIndex; } } else { break; } } if (lineIndex) { // Fix position. event[1].end = Object.assign({}, events.get(lineIndex)[1].start); // Switch container exit w/ line endings. parameters = events.slice(lineIndex, index); parameters.unshift(event); events.splice(lineIndex, index - lineIndex + 1, parameters); } } } // The changes to the `events` buffer must be copied back into the eventsArray splice$1(eventsArray, 0, Number.POSITIVE_INFINITY, events.slice(0)); return !more; } /** * Tokenize embedded tokens. * * @param {SpliceBuffer} events * @param {number} eventIndex * @returns {Record} */ function subcontent(events, eventIndex) { const token = events.get(eventIndex)[1]; const context = events.get(eventIndex)[2]; let startPosition = eventIndex - 1; /** @type {Array} */ const startPositions = []; const tokenizer = token._tokenizer || context.parser[token.contentType](token.start); const childEvents = tokenizer.events; /** @type {Array<[number, number]>} */ const jumps = []; /** @type {Record} */ const gaps = {}; /** @type {Array} */ let stream; /** @type {Token | undefined} */ let previous; let index = -1; /** @type {Token | undefined} */ let current = token; let adjust = 0; let start = 0; const breaks = [start]; // Loop forward through the linked tokens to pass them in order to the // subtokenizer. while (current) { // Find the position of the event for this token. while (events.get(++startPosition)[1] !== current) { // Empty. } startPositions.push(startPosition); if (!current._tokenizer) { stream = context.sliceStream(current); if (!current.next) { stream.push(null); } if (previous) { tokenizer.defineSkip(current.start); } if (current._isInFirstContentOfListItem) { tokenizer._gfmTasklistFirstContentOfListItem = true; } tokenizer.write(stream); if (current._isInFirstContentOfListItem) { tokenizer._gfmTasklistFirstContentOfListItem = undefined; } } // Unravel the next token. previous = current; current = current.next; } // Now, loop back through all events (and linked tokens), to figure out which // parts belong where. current = token; while (++index < childEvents.length) { if ( // Find a void token that includes a break. childEvents[index][0] === 'exit' && childEvents[index - 1][0] === 'enter' && childEvents[index][1].type === childEvents[index - 1][1].type && childEvents[index][1].start.line !== childEvents[index][1].end.line) { start = index + 1; breaks.push(start); // Help GC. current._tokenizer = undefined; current.previous = undefined; current = current.next; } } // Help GC. tokenizer.events = []; // If there’s one more token (which is the cases for lines that end in an // EOF), that’s perfect: the last point we found starts it. // If there isn’t then make sure any remaining content is added to it. if (current) { // Help GC. current._tokenizer = undefined; current.previous = undefined; } else { breaks.pop(); } // Now splice the events from the subtokenizer into the current events, // moving back to front so that splice indices aren’t affected. index = breaks.length; while (index--) { const slice = childEvents.slice(breaks[index], breaks[index + 1]); const start = startPositions.pop(); jumps.push([start, start + slice.length - 1]); events.splice(start, 2, slice); } jumps.reverse(); index = -1; while (++index < jumps.length) { gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]; adjust += jumps[index][1] - jumps[index][0] - 1; } return gaps; } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** * No name because it must not be turned off. * @type {Construct} */ const content = { tokenize: tokenizeContent, resolve: resolveContent }; /** @type {Construct} */ const continuationConstruct = { tokenize: tokenizeContinuation, partial: true }; /** * Content is transparent: it’s parsed right now. That way, definitions are also * parsed right now: before text in paragraphs (specifically, media) are parsed. * * @type {Resolver} */ function resolveContent(events) { subtokenize(events); return events; } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeContent(effects, ok) { /** @type {Token | undefined} */ let previous; return chunkStart; /** * Before a content chunk. * * ```markdown * > | abc * ^ * ``` * * @type {State} */ function chunkStart(code) { effects.enter("content"); previous = effects.enter("chunkContent", { contentType: "content" }); return chunkInside(code); } /** * In a content chunk. * * ```markdown * > | abc * ^^^ * ``` * * @type {State} */ function chunkInside(code) { if (code === null) { return contentEnd(code); } // To do: in `markdown-rs`, each line is parsed on its own, and everything // is stitched together resolving. if (markdownLineEnding(code)) { return effects.check(continuationConstruct, contentContinue, contentEnd)(code); } // Data. effects.consume(code); return chunkInside; } /** * * * @type {State} */ function contentEnd(code) { effects.exit("chunkContent"); effects.exit("content"); return ok(code); } /** * * * @type {State} */ function contentContinue(code) { effects.consume(code); effects.exit("chunkContent"); previous.next = effects.enter("chunkContent", { contentType: "content", previous }); previous = previous.next; return chunkInside; } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeContinuation(effects, ok, nok) { const self = this; return startLookahead; /** * * * @type {State} */ function startLookahead(code) { effects.exit("chunkContent"); effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return factorySpace(effects, prefixed, "linePrefix"); } /** * * * @type {State} */ function prefixed(code) { if (code === null || markdownLineEnding(code)) { return nok(code); } // Always populated by defaults. const tail = self.events[self.events.length - 1]; if (!self.parser.constructs.disable.null.includes('codeIndented') && tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4) { return ok(code); } return effects.interrupt(self.parser.constructs.flow, nok, ok)(code); } } /** * @typedef {import('micromark-util-types').Effects} Effects * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenType} TokenType */ /** * Parse destinations. * * ###### Examples * * ```markdown * * b> * * * a * a\)b * a(b)c * a(b) * ``` * * @param {Effects} effects * Context. * @param {State} ok * State switched to when successful. * @param {State} nok * State switched to when unsuccessful. * @param {TokenType} type * Type for whole (`` or `b`). * @param {TokenType} literalType * Type when enclosed (``). * @param {TokenType} literalMarkerType * Type for enclosing (`<` and `>`). * @param {TokenType} rawType * Type when not enclosed (`b`). * @param {TokenType} stringType * Type for the value (`a` or `b`). * @param {number | undefined} [max=Infinity] * Depth of nested parens (inclusive). * @returns {State} * Start state. */ // eslint-disable-next-line max-params function factoryDestination( effects, ok, nok, type, literalType, literalMarkerType, rawType, stringType, max ) { const limit = max || Number.POSITIVE_INFINITY; let balance = 0; return start /** * Start of destination. * * ```markdown * > | * ^ * > | aa * ^ * ``` * * @type {State} */ function start(code) { if (code === 60) { effects.enter(type); effects.enter(literalType); effects.enter(literalMarkerType); effects.consume(code); effects.exit(literalMarkerType); return enclosedBefore } // ASCII control, space, closing paren. if (code === null || code === 32 || code === 41 || asciiControl(code)) { return nok(code) } effects.enter(type); effects.enter(rawType); effects.enter(stringType); effects.enter('chunkString', { contentType: 'string' }); return raw(code) } /** * After `<`, at an enclosed destination. * * ```markdown * > | * ^ * ``` * * @type {State} */ function enclosedBefore(code) { if (code === 62) { effects.enter(literalMarkerType); effects.consume(code); effects.exit(literalMarkerType); effects.exit(literalType); effects.exit(type); return ok } effects.enter(stringType); effects.enter('chunkString', { contentType: 'string' }); return enclosed(code) } /** * In enclosed destination. * * ```markdown * > | * ^ * ``` * * @type {State} */ function enclosed(code) { if (code === 62) { effects.exit('chunkString'); effects.exit(stringType); return enclosedBefore(code) } if (code === null || code === 60 || markdownLineEnding(code)) { return nok(code) } effects.consume(code); return code === 92 ? enclosedEscape : enclosed } /** * After `\`, at a special character. * * ```markdown * > | * ^ * ``` * * @type {State} */ function enclosedEscape(code) { if (code === 60 || code === 62 || code === 92) { effects.consume(code); return enclosed } return enclosed(code) } /** * In raw destination. * * ```markdown * > | aa * ^ * ``` * * @type {State} */ function raw(code) { if ( !balance && (code === null || code === 41 || markdownLineEndingOrSpace(code)) ) { effects.exit('chunkString'); effects.exit(stringType); effects.exit(rawType); effects.exit(type); return ok(code) } if (balance < limit && code === 40) { effects.consume(code); balance++; return raw } if (code === 41) { effects.consume(code); balance--; return raw } // ASCII control (but *not* `\0`) and space and `(`. // Note: in `markdown-rs`, `\0` exists in codes, in `micromark-js` it // doesn’t. if (code === null || code === 32 || code === 40 || asciiControl(code)) { return nok(code) } effects.consume(code); return code === 92 ? rawEscape : raw } /** * After `\`, at special character. * * ```markdown * > | a\*a * ^ * ``` * * @type {State} */ function rawEscape(code) { if (code === 40 || code === 41 || code === 92) { effects.consume(code); return raw } return raw(code) } } /** * @typedef {import('micromark-util-types').Effects} Effects * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').TokenType} TokenType */ /** * Parse labels. * * > 👉 **Note**: labels in markdown are capped at 999 characters in the string. * * ###### Examples * * ```markdown * [a] * [a * b] * [a\]b] * ``` * * @this {TokenizeContext} * Tokenize context. * @param {Effects} effects * Context. * @param {State} ok * State switched to when successful. * @param {State} nok * State switched to when unsuccessful. * @param {TokenType} type * Type of the whole label (`[a]`). * @param {TokenType} markerType * Type for the markers (`[` and `]`). * @param {TokenType} stringType * Type for the identifier (`a`). * @returns {State} * Start state. */ // eslint-disable-next-line max-params function factoryLabel(effects, ok, nok, type, markerType, stringType) { const self = this; let size = 0; /** @type {boolean} */ let seen; return start /** * Start of label. * * ```markdown * > | [a] * ^ * ``` * * @type {State} */ function start(code) { effects.enter(type); effects.enter(markerType); effects.consume(code); effects.exit(markerType); effects.enter(stringType); return atBreak } /** * In label, at something, before something else. * * ```markdown * > | [a] * ^ * ``` * * @type {State} */ function atBreak(code) { if ( size > 999 || code === null || code === 91 || (code === 93 && !seen) || // To do: remove in the future once we’ve switched from // `micromark-extension-footnote` to `micromark-extension-gfm-footnote`, // which doesn’t need this. // Hidden footnotes hook. /* c8 ignore next 3 */ (code === 94 && !size && '_hiddenFootnoteSupport' in self.parser.constructs) ) { return nok(code) } if (code === 93) { effects.exit(stringType); effects.enter(markerType); effects.consume(code); effects.exit(markerType); effects.exit(type); return ok } // To do: indent? Link chunks and EOLs together? if (markdownLineEnding(code)) { effects.enter('lineEnding'); effects.consume(code); effects.exit('lineEnding'); return atBreak } effects.enter('chunkString', { contentType: 'string' }); return labelInside(code) } /** * In label, in text. * * ```markdown * > | [a] * ^ * ``` * * @type {State} */ function labelInside(code) { if ( code === null || code === 91 || code === 93 || markdownLineEnding(code) || size++ > 999 ) { effects.exit('chunkString'); return atBreak(code) } effects.consume(code); if (!seen) seen = !markdownSpace(code); return code === 92 ? labelEscape : labelInside } /** * After `\`, at a special character. * * ```markdown * > | [a\*a] * ^ * ``` * * @type {State} */ function labelEscape(code) { if (code === 91 || code === 92 || code === 93) { effects.consume(code); size++; return labelInside } return labelInside(code) } } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Effects} Effects * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenType} TokenType */ /** * Parse titles. * * ###### Examples * * ```markdown * "a" * 'b' * (c) * "a * b" * 'a * b' * (a\)b) * ``` * * @param {Effects} effects * Context. * @param {State} ok * State switched to when successful. * @param {State} nok * State switched to when unsuccessful. * @param {TokenType} type * Type of the whole title (`"a"`, `'b'`, `(c)`). * @param {TokenType} markerType * Type for the markers (`"`, `'`, `(`, and `)`). * @param {TokenType} stringType * Type for the value (`a`). * @returns {State} * Start state. */ // eslint-disable-next-line max-params function factoryTitle(effects, ok, nok, type, markerType, stringType) { /** @type {NonNullable} */ let marker; return start /** * Start of title. * * ```markdown * > | "a" * ^ * ``` * * @type {State} */ function start(code) { if (code === 34 || code === 39 || code === 40) { effects.enter(type); effects.enter(markerType); effects.consume(code); effects.exit(markerType); marker = code === 40 ? 41 : code; return begin } return nok(code) } /** * After opening marker. * * This is also used at the closing marker. * * ```markdown * > | "a" * ^ * ``` * * @type {State} */ function begin(code) { if (code === marker) { effects.enter(markerType); effects.consume(code); effects.exit(markerType); effects.exit(type); return ok } effects.enter(stringType); return atBreak(code) } /** * At something, before something else. * * ```markdown * > | "a" * ^ * ``` * * @type {State} */ function atBreak(code) { if (code === marker) { effects.exit(stringType); return begin(marker) } if (code === null) { return nok(code) } // Note: blank lines can’t exist in content. if (markdownLineEnding(code)) { // To do: use `space_or_tab_eol_with_options`, connect. effects.enter('lineEnding'); effects.consume(code); effects.exit('lineEnding'); return factorySpace(effects, atBreak, 'linePrefix') } effects.enter('chunkString', { contentType: 'string' }); return inside(code) } /** * * * @type {State} */ function inside(code) { if (code === marker || code === null || markdownLineEnding(code)) { effects.exit('chunkString'); return atBreak(code) } effects.consume(code); return code === 92 ? escape : inside } /** * After `\`, at a special character. * * ```markdown * > | "a\*b" * ^ * ``` * * @type {State} */ function escape(code) { if (code === marker || code === 92) { effects.consume(code); return inside } return inside(code) } } /** * @typedef {import('micromark-util-types').Effects} Effects * @typedef {import('micromark-util-types').State} State */ /** * Parse spaces and tabs. * * There is no `nok` parameter: * * * line endings or spaces in markdown are often optional, in which case this * factory can be used and `ok` will be switched to whether spaces were found * or not * * one line ending or space can be detected with * `markdownLineEndingOrSpace(code)` right before using `factoryWhitespace` * * @param {Effects} effects * Context. * @param {State} ok * State switched to when successful. * @returns {State} * Start state. */ function factoryWhitespace(effects, ok) { /** @type {boolean} */ let seen; return start /** @type {State} */ function start(code) { if (markdownLineEnding(code)) { effects.enter('lineEnding'); effects.consume(code); effects.exit('lineEnding'); seen = true; return start } if (markdownSpace(code)) { return factorySpace( effects, start, seen ? 'linePrefix' : 'lineSuffix' )(code) } return ok(code) } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const definition$1 = { name: 'definition', tokenize: tokenizeDefinition }; /** @type {Construct} */ const titleBefore = { tokenize: tokenizeTitleBefore, partial: true }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeDefinition(effects, ok, nok) { const self = this; /** @type {string} */ let identifier; return start; /** * At start of a definition. * * ```markdown * > | [a]: b "c" * ^ * ``` * * @type {State} */ function start(code) { // Do not interrupt paragraphs (but do follow definitions). // To do: do `interrupt` the way `markdown-rs` does. // To do: parse whitespace the way `markdown-rs` does. effects.enter("definition"); return before(code); } /** * After optional whitespace, at `[`. * * ```markdown * > | [a]: b "c" * ^ * ``` * * @type {State} */ function before(code) { // To do: parse whitespace the way `markdown-rs` does. return factoryLabel.call(self, effects, labelAfter, // Note: we don’t need to reset the way `markdown-rs` does. nok, "definitionLabel", "definitionLabelMarker", "definitionLabelString")(code); } /** * After label. * * ```markdown * > | [a]: b "c" * ^ * ``` * * @type {State} */ function labelAfter(code) { identifier = normalizeIdentifier(self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1)); if (code === 58) { effects.enter("definitionMarker"); effects.consume(code); effects.exit("definitionMarker"); return markerAfter; } return nok(code); } /** * After marker. * * ```markdown * > | [a]: b "c" * ^ * ``` * * @type {State} */ function markerAfter(code) { // Note: whitespace is optional. return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, destinationBefore)(code) : destinationBefore(code); } /** * Before destination. * * ```markdown * > | [a]: b "c" * ^ * ``` * * @type {State} */ function destinationBefore(code) { return factoryDestination(effects, destinationAfter, // Note: we don’t need to reset the way `markdown-rs` does. nok, "definitionDestination", "definitionDestinationLiteral", "definitionDestinationLiteralMarker", "definitionDestinationRaw", "definitionDestinationString")(code); } /** * After destination. * * ```markdown * > | [a]: b "c" * ^ * ``` * * @type {State} */ function destinationAfter(code) { return effects.attempt(titleBefore, after, after)(code); } /** * After definition. * * ```markdown * > | [a]: b * ^ * > | [a]: b "c" * ^ * ``` * * @type {State} */ function after(code) { return markdownSpace(code) ? factorySpace(effects, afterWhitespace, "whitespace")(code) : afterWhitespace(code); } /** * After definition, after optional whitespace. * * ```markdown * > | [a]: b * ^ * > | [a]: b "c" * ^ * ``` * * @type {State} */ function afterWhitespace(code) { if (code === null || markdownLineEnding(code)) { effects.exit("definition"); // Note: we don’t care about uniqueness. // It’s likely that that doesn’t happen very frequently. // It is more likely that it wastes precious time. self.parser.defined.push(identifier); // To do: `markdown-rs` interrupt. // // You’d be interrupting. // tokenizer.interrupt = true return ok(code); } return nok(code); } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeTitleBefore(effects, ok, nok) { return titleBefore; /** * After destination, at whitespace. * * ```markdown * > | [a]: b * ^ * > | [a]: b "c" * ^ * ``` * * @type {State} */ function titleBefore(code) { return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, beforeMarker)(code) : nok(code); } /** * At title. * * ```markdown * | [a]: b * > | "c" * ^ * ``` * * @type {State} */ function beforeMarker(code) { return factoryTitle(effects, titleAfter, nok, "definitionTitle", "definitionTitleMarker", "definitionTitleString")(code); } /** * After title. * * ```markdown * > | [a]: b "c" * ^ * ``` * * @type {State} */ function titleAfter(code) { return markdownSpace(code) ? factorySpace(effects, titleAfterOptionalWhitespace, "whitespace")(code) : titleAfterOptionalWhitespace(code); } /** * After title, after optional whitespace. * * ```markdown * > | [a]: b "c" * ^ * ``` * * @type {State} */ function titleAfterOptionalWhitespace(code) { return code === null || markdownLineEnding(code) ? ok(code) : nok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const hardBreakEscape = { name: 'hardBreakEscape', tokenize: tokenizeHardBreakEscape }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeHardBreakEscape(effects, ok, nok) { return start; /** * Start of a hard break (escape). * * ```markdown * > | a\ * ^ * | b * ``` * * @type {State} */ function start(code) { effects.enter("hardBreakEscape"); effects.consume(code); return after; } /** * After `\`, at eol. * * ```markdown * > | a\ * ^ * | b * ``` * * @type {State} */ function after(code) { if (markdownLineEnding(code)) { effects.exit("hardBreakEscape"); return ok(code); } return nok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const headingAtx = { name: 'headingAtx', tokenize: tokenizeHeadingAtx, resolve: resolveHeadingAtx }; /** @type {Resolver} */ function resolveHeadingAtx(events, context) { let contentEnd = events.length - 2; let contentStart = 3; /** @type {Token} */ let content; /** @type {Token} */ let text; // Prefix whitespace, part of the opening. if (events[contentStart][1].type === "whitespace") { contentStart += 2; } // Suffix whitespace, part of the closing. if (contentEnd - 2 > contentStart && events[contentEnd][1].type === "whitespace") { contentEnd -= 2; } if (events[contentEnd][1].type === "atxHeadingSequence" && (contentStart === contentEnd - 1 || contentEnd - 4 > contentStart && events[contentEnd - 2][1].type === "whitespace")) { contentEnd -= contentStart + 1 === contentEnd ? 2 : 4; } if (contentEnd > contentStart) { content = { type: "atxHeadingText", start: events[contentStart][1].start, end: events[contentEnd][1].end }; text = { type: "chunkText", start: events[contentStart][1].start, end: events[contentEnd][1].end, contentType: "text" }; splice$1(events, contentStart, contentEnd - contentStart + 1, [['enter', content, context], ['enter', text, context], ['exit', text, context], ['exit', content, context]]); } return events; } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeHeadingAtx(effects, ok, nok) { let size = 0; return start; /** * Start of a heading (atx). * * ```markdown * > | ## aa * ^ * ``` * * @type {State} */ function start(code) { // To do: parse indent like `markdown-rs`. effects.enter("atxHeading"); return before(code); } /** * After optional whitespace, at `#`. * * ```markdown * > | ## aa * ^ * ``` * * @type {State} */ function before(code) { effects.enter("atxHeadingSequence"); return sequenceOpen(code); } /** * In opening sequence. * * ```markdown * > | ## aa * ^ * ``` * * @type {State} */ function sequenceOpen(code) { if (code === 35 && size++ < 6) { effects.consume(code); return sequenceOpen; } // Always at least one `#`. if (code === null || markdownLineEndingOrSpace(code)) { effects.exit("atxHeadingSequence"); return atBreak(code); } return nok(code); } /** * After something, before something else. * * ```markdown * > | ## aa * ^ * ``` * * @type {State} */ function atBreak(code) { if (code === 35) { effects.enter("atxHeadingSequence"); return sequenceFurther(code); } if (code === null || markdownLineEnding(code)) { effects.exit("atxHeading"); // To do: interrupt like `markdown-rs`. // // Feel free to interrupt. // tokenizer.interrupt = false return ok(code); } if (markdownSpace(code)) { return factorySpace(effects, atBreak, "whitespace")(code); } // To do: generate `data` tokens, add the `text` token later. // Needs edit map, see: `markdown.rs`. effects.enter("atxHeadingText"); return data(code); } /** * In further sequence (after whitespace). * * Could be normal “visible” hashes in the heading or a final sequence. * * ```markdown * > | ## aa ## * ^ * ``` * * @type {State} */ function sequenceFurther(code) { if (code === 35) { effects.consume(code); return sequenceFurther; } effects.exit("atxHeadingSequence"); return atBreak(code); } /** * In text. * * ```markdown * > | ## aa * ^ * ``` * * @type {State} */ function data(code) { if (code === null || code === 35 || markdownLineEndingOrSpace(code)) { effects.exit("atxHeadingText"); return atBreak(code); } effects.consume(code); return data; } } /** * List of lowercase HTML “block” tag names. * * The list, when parsing HTML (flow), results in more relaxed rules (condition * 6). * Because they are known blocks, the HTML-like syntax doesn’t have to be * strictly parsed. * For tag names not in this list, a more strict algorithm (condition 7) is used * to detect whether the HTML-like syntax is seen as HTML (flow) or not. * * This is copied from: * . * * > 👉 **Note**: `search` was added in `CommonMark@0.31`. */ const htmlBlockNames = [ 'address', 'article', 'aside', 'base', 'basefont', 'blockquote', 'body', 'caption', 'center', 'col', 'colgroup', 'dd', 'details', 'dialog', 'dir', 'div', 'dl', 'dt', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'header', 'hr', 'html', 'iframe', 'legend', 'li', 'link', 'main', 'menu', 'menuitem', 'nav', 'noframes', 'ol', 'optgroup', 'option', 'p', 'param', 'search', 'section', 'summary', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'title', 'tr', 'track', 'ul' ]; /** * List of lowercase HTML “raw” tag names. * * The list, when parsing HTML (flow), results in HTML that can include lines * without exiting, until a closing tag also in this list is found (condition * 1). * * This module is copied from: * . * * > 👉 **Note**: `textarea` was added in `CommonMark@0.30`. */ const htmlRawNames = ['pre', 'script', 'style', 'textarea']; /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const htmlFlow = { name: 'htmlFlow', tokenize: tokenizeHtmlFlow, resolveTo: resolveToHtmlFlow, concrete: true }; /** @type {Construct} */ const blankLineBefore = { tokenize: tokenizeBlankLineBefore, partial: true }; const nonLazyContinuationStart = { tokenize: tokenizeNonLazyContinuationStart, partial: true }; /** @type {Resolver} */ function resolveToHtmlFlow(events) { let index = events.length; while (index--) { if (events[index][0] === 'enter' && events[index][1].type === "htmlFlow") { break; } } if (index > 1 && events[index - 2][1].type === "linePrefix") { // Add the prefix start to the HTML token. events[index][1].start = events[index - 2][1].start; // Add the prefix start to the HTML line token. events[index + 1][1].start = events[index - 2][1].start; // Remove the line prefix. events.splice(index - 2, 2); } return events; } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeHtmlFlow(effects, ok, nok) { const self = this; /** @type {number} */ let marker; /** @type {boolean} */ let closingTag; /** @type {string} */ let buffer; /** @type {number} */ let index; /** @type {Code} */ let markerB; return start; /** * Start of HTML (flow). * * ```markdown * > | * ^ * ``` * * @type {State} */ function start(code) { // To do: parse indent like `markdown-rs`. return before(code); } /** * At `<`, after optional whitespace. * * ```markdown * > | * ^ * ``` * * @type {State} */ function before(code) { effects.enter("htmlFlow"); effects.enter("htmlFlowData"); effects.consume(code); return open; } /** * After `<`, at tag name or other stuff. * * ```markdown * > | * ^ * > | * ^ * > | * ^ * ``` * * @type {State} */ function open(code) { if (code === 33) { effects.consume(code); return declarationOpen; } if (code === 47) { effects.consume(code); closingTag = true; return tagCloseStart; } if (code === 63) { effects.consume(code); marker = 3; // To do: // tokenizer.concrete = true // To do: use `markdown-rs` style interrupt. // While we’re in an instruction instead of a declaration, we’re on a `?` // right now, so we do need to search for `>`, similar to declarations. return self.interrupt ? ok : continuationDeclarationInside; } // ASCII alphabetical. if (asciiAlpha(code)) { effects.consume(code); // @ts-expect-error: not null. buffer = String.fromCharCode(code); return tagName; } return nok(code); } /** * After ` | * ^ * > | * ^ * > | &<]]> * ^ * ``` * * @type {State} */ function declarationOpen(code) { if (code === 45) { effects.consume(code); marker = 2; return commentOpenInside; } if (code === 91) { effects.consume(code); marker = 5; index = 0; return cdataOpenInside; } // ASCII alphabetical. if (asciiAlpha(code)) { effects.consume(code); marker = 4; // // Do not form containers. // tokenizer.concrete = true return self.interrupt ? ok : continuationDeclarationInside; } return nok(code); } /** * After ` | * ^ * ``` * * @type {State} */ function commentOpenInside(code) { if (code === 45) { effects.consume(code); // // Do not form containers. // tokenizer.concrete = true return self.interrupt ? ok : continuationDeclarationInside; } return nok(code); } /** * After ` | &<]]> * ^^^^^^ * ``` * * @type {State} */ function cdataOpenInside(code) { const value = "CDATA["; if (code === value.charCodeAt(index++)) { effects.consume(code); if (index === value.length) { // // Do not form containers. // tokenizer.concrete = true return self.interrupt ? ok : continuation; } return cdataOpenInside; } return nok(code); } /** * After ` | * ^ * ``` * * @type {State} */ function tagCloseStart(code) { if (asciiAlpha(code)) { effects.consume(code); // @ts-expect-error: not null. buffer = String.fromCharCode(code); return tagName; } return nok(code); } /** * In tag name. * * ```markdown * > | * ^^ * > | * ^^ * ``` * * @type {State} */ function tagName(code) { if (code === null || code === 47 || code === 62 || markdownLineEndingOrSpace(code)) { const slash = code === 47; const name = buffer.toLowerCase(); if (!slash && !closingTag && htmlRawNames.includes(name)) { marker = 1; // // Do not form containers. // tokenizer.concrete = true return self.interrupt ? ok(code) : continuation(code); } if (htmlBlockNames.includes(buffer.toLowerCase())) { marker = 6; if (slash) { effects.consume(code); return basicSelfClosing; } // // Do not form containers. // tokenizer.concrete = true return self.interrupt ? ok(code) : continuation(code); } marker = 7; // Do not support complete HTML when interrupting. return self.interrupt && !self.parser.lazy[self.now().line] ? nok(code) : closingTag ? completeClosingTagAfter(code) : completeAttributeNameBefore(code); } // ASCII alphanumerical and `-`. if (code === 45 || asciiAlphanumeric(code)) { effects.consume(code); buffer += String.fromCharCode(code); return tagName; } return nok(code); } /** * After closing slash of a basic tag name. * * ```markdown * > |
* ^ * ``` * * @type {State} */ function basicSelfClosing(code) { if (code === 62) { effects.consume(code); // // Do not form containers. // tokenizer.concrete = true return self.interrupt ? ok : continuation; } return nok(code); } /** * After closing slash of a complete tag name. * * ```markdown * > | * ^ * ``` * * @type {State} */ function completeClosingTagAfter(code) { if (markdownSpace(code)) { effects.consume(code); return completeClosingTagAfter; } return completeEnd(code); } /** * At an attribute name. * * At first, this state is used after a complete tag name, after whitespace, * where it expects optional attributes or the end of the tag. * It is also reused after attributes, when expecting more optional * attributes. * * ```markdown * > | * ^ * > | * ^ * > | * ^ * > | * ^ * > | * ^ * ``` * * @type {State} */ function completeAttributeNameBefore(code) { if (code === 47) { effects.consume(code); return completeEnd; } // ASCII alphanumerical and `:` and `_`. if (code === 58 || code === 95 || asciiAlpha(code)) { effects.consume(code); return completeAttributeName; } if (markdownSpace(code)) { effects.consume(code); return completeAttributeNameBefore; } return completeEnd(code); } /** * In attribute name. * * ```markdown * > | * ^ * > | * ^ * > | * ^ * ``` * * @type {State} */ function completeAttributeName(code) { // ASCII alphanumerical and `-`, `.`, `:`, and `_`. if (code === 45 || code === 46 || code === 58 || code === 95 || asciiAlphanumeric(code)) { effects.consume(code); return completeAttributeName; } return completeAttributeNameAfter(code); } /** * After attribute name, at an optional initializer, the end of the tag, or * whitespace. * * ```markdown * > | * ^ * > | * ^ * ``` * * @type {State} */ function completeAttributeNameAfter(code) { if (code === 61) { effects.consume(code); return completeAttributeValueBefore; } if (markdownSpace(code)) { effects.consume(code); return completeAttributeNameAfter; } return completeAttributeNameBefore(code); } /** * Before unquoted, double quoted, or single quoted attribute value, allowing * whitespace. * * ```markdown * > | * ^ * > | * ^ * ``` * * @type {State} */ function completeAttributeValueBefore(code) { if (code === null || code === 60 || code === 61 || code === 62 || code === 96) { return nok(code); } if (code === 34 || code === 39) { effects.consume(code); markerB = code; return completeAttributeValueQuoted; } if (markdownSpace(code)) { effects.consume(code); return completeAttributeValueBefore; } return completeAttributeValueUnquoted(code); } /** * In double or single quoted attribute value. * * ```markdown * > | * ^ * > | * ^ * ``` * * @type {State} */ function completeAttributeValueQuoted(code) { if (code === markerB) { effects.consume(code); markerB = null; return completeAttributeValueQuotedAfter; } if (code === null || markdownLineEnding(code)) { return nok(code); } effects.consume(code); return completeAttributeValueQuoted; } /** * In unquoted attribute value. * * ```markdown * > | * ^ * ``` * * @type {State} */ function completeAttributeValueUnquoted(code) { if (code === null || code === 34 || code === 39 || code === 47 || code === 60 || code === 61 || code === 62 || code === 96 || markdownLineEndingOrSpace(code)) { return completeAttributeNameAfter(code); } effects.consume(code); return completeAttributeValueUnquoted; } /** * After double or single quoted attribute value, before whitespace or the * end of the tag. * * ```markdown * > | * ^ * ``` * * @type {State} */ function completeAttributeValueQuotedAfter(code) { if (code === 47 || code === 62 || markdownSpace(code)) { return completeAttributeNameBefore(code); } return nok(code); } /** * In certain circumstances of a complete tag where only an `>` is allowed. * * ```markdown * > | * ^ * ``` * * @type {State} */ function completeEnd(code) { if (code === 62) { effects.consume(code); return completeAfter; } return nok(code); } /** * After `>` in a complete tag. * * ```markdown * > | * ^ * ``` * * @type {State} */ function completeAfter(code) { if (code === null || markdownLineEnding(code)) { // // Do not form containers. // tokenizer.concrete = true return continuation(code); } if (markdownSpace(code)) { effects.consume(code); return completeAfter; } return nok(code); } /** * In continuation of any HTML kind. * * ```markdown * > | * ^ * ``` * * @type {State} */ function continuation(code) { if (code === 45 && marker === 2) { effects.consume(code); return continuationCommentInside; } if (code === 60 && marker === 1) { effects.consume(code); return continuationRawTagOpen; } if (code === 62 && marker === 4) { effects.consume(code); return continuationClose; } if (code === 63 && marker === 3) { effects.consume(code); return continuationDeclarationInside; } if (code === 93 && marker === 5) { effects.consume(code); return continuationCdataInside; } if (markdownLineEnding(code) && (marker === 6 || marker === 7)) { effects.exit("htmlFlowData"); return effects.check(blankLineBefore, continuationAfter, continuationStart)(code); } if (code === null || markdownLineEnding(code)) { effects.exit("htmlFlowData"); return continuationStart(code); } effects.consume(code); return continuation; } /** * In continuation, at eol. * * ```markdown * > | * ^ * | asd * ``` * * @type {State} */ function continuationStart(code) { return effects.check(nonLazyContinuationStart, continuationStartNonLazy, continuationAfter)(code); } /** * In continuation, at eol, before non-lazy content. * * ```markdown * > | * ^ * | asd * ``` * * @type {State} */ function continuationStartNonLazy(code) { effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return continuationBefore; } /** * In continuation, before non-lazy content. * * ```markdown * | * > | asd * ^ * ``` * * @type {State} */ function continuationBefore(code) { if (code === null || markdownLineEnding(code)) { return continuationStart(code); } effects.enter("htmlFlowData"); return continuation(code); } /** * In comment continuation, after one `-`, expecting another. * * ```markdown * > | * ^ * ``` * * @type {State} */ function continuationCommentInside(code) { if (code === 45) { effects.consume(code); return continuationDeclarationInside; } return continuation(code); } /** * In raw continuation, after `<`, at `/`. * * ```markdown * > | * ^ * ``` * * @type {State} */ function continuationRawTagOpen(code) { if (code === 47) { effects.consume(code); buffer = ''; return continuationRawEndTag; } return continuation(code); } /** * In raw continuation, after ` | * ^^^^^^ * ``` * * @type {State} */ function continuationRawEndTag(code) { if (code === 62) { const name = buffer.toLowerCase(); if (htmlRawNames.includes(name)) { effects.consume(code); return continuationClose; } return continuation(code); } if (asciiAlpha(code) && buffer.length < 8) { effects.consume(code); // @ts-expect-error: not null. buffer += String.fromCharCode(code); return continuationRawEndTag; } return continuation(code); } /** * In cdata continuation, after `]`, expecting `]>`. * * ```markdown * > | &<]]> * ^ * ``` * * @type {State} */ function continuationCdataInside(code) { if (code === 93) { effects.consume(code); return continuationDeclarationInside; } return continuation(code); } /** * In declaration or instruction continuation, at `>`. * * ```markdown * > | * ^ * > | * ^ * > | * ^ * > | * ^ * > | &<]]> * ^ * ``` * * @type {State} */ function continuationDeclarationInside(code) { if (code === 62) { effects.consume(code); return continuationClose; } // More dashes. if (code === 45 && marker === 2) { effects.consume(code); return continuationDeclarationInside; } return continuation(code); } /** * In closed continuation: everything we get until the eol/eof is part of it. * * ```markdown * > | * ^ * ``` * * @type {State} */ function continuationClose(code) { if (code === null || markdownLineEnding(code)) { effects.exit("htmlFlowData"); return continuationAfter(code); } effects.consume(code); return continuationClose; } /** * Done. * * ```markdown * > | * ^ * ``` * * @type {State} */ function continuationAfter(code) { effects.exit("htmlFlow"); // // Feel free to interrupt. // tokenizer.interrupt = false // // No longer concrete. // tokenizer.concrete = false return ok(code); } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeNonLazyContinuationStart(effects, ok, nok) { const self = this; return start; /** * At eol, before continuation. * * ```markdown * > | * ```js * ^ * | b * ``` * * @type {State} */ function start(code) { if (markdownLineEnding(code)) { effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return after; } return nok(code); } /** * A continuation. * * ```markdown * | * ```js * > | b * ^ * ``` * * @type {State} */ function after(code) { return self.parser.lazy[self.now().line] ? nok(code) : ok(code); } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeBlankLineBefore(effects, ok, nok) { return start; /** * Before eol, expecting blank line. * * ```markdown * > |
* ^ * | * ``` * * @type {State} */ function start(code) { effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return effects.attempt(blankLine, ok, nok); } } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const htmlText = { name: 'htmlText', tokenize: tokenizeHtmlText }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeHtmlText(effects, ok, nok) { const self = this; /** @type {NonNullable | undefined} */ let marker; /** @type {number} */ let index; /** @type {State} */ let returnState; return start; /** * Start of HTML (text). * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function start(code) { effects.enter("htmlText"); effects.enter("htmlTextData"); effects.consume(code); return open; } /** * After `<`, at tag name or other stuff. * * ```markdown * > | a c * ^ * > | a c * ^ * > | a c * ^ * ``` * * @type {State} */ function open(code) { if (code === 33) { effects.consume(code); return declarationOpen; } if (code === 47) { effects.consume(code); return tagCloseStart; } if (code === 63) { effects.consume(code); return instruction; } // ASCII alphabetical. if (asciiAlpha(code)) { effects.consume(code); return tagOpen; } return nok(code); } /** * After ` | a c * ^ * > | a c * ^ * > | a &<]]> c * ^ * ``` * * @type {State} */ function declarationOpen(code) { if (code === 45) { effects.consume(code); return commentOpenInside; } if (code === 91) { effects.consume(code); index = 0; return cdataOpenInside; } if (asciiAlpha(code)) { effects.consume(code); return declaration; } return nok(code); } /** * In a comment, after ` | a c * ^ * ``` * * @type {State} */ function commentOpenInside(code) { if (code === 45) { effects.consume(code); return commentEnd; } return nok(code); } /** * In comment. * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function comment(code) { if (code === null) { return nok(code); } if (code === 45) { effects.consume(code); return commentClose; } if (markdownLineEnding(code)) { returnState = comment; return lineEndingBefore(code); } effects.consume(code); return comment; } /** * In comment, after `-`. * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function commentClose(code) { if (code === 45) { effects.consume(code); return commentEnd; } return comment(code); } /** * In comment, after `--`. * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function commentEnd(code) { return code === 62 ? end(code) : code === 45 ? commentClose(code) : comment(code); } /** * After ` | a &<]]> b * ^^^^^^ * ``` * * @type {State} */ function cdataOpenInside(code) { const value = "CDATA["; if (code === value.charCodeAt(index++)) { effects.consume(code); return index === value.length ? cdata : cdataOpenInside; } return nok(code); } /** * In CDATA. * * ```markdown * > | a &<]]> b * ^^^ * ``` * * @type {State} */ function cdata(code) { if (code === null) { return nok(code); } if (code === 93) { effects.consume(code); return cdataClose; } if (markdownLineEnding(code)) { returnState = cdata; return lineEndingBefore(code); } effects.consume(code); return cdata; } /** * In CDATA, after `]`, at another `]`. * * ```markdown * > | a &<]]> b * ^ * ``` * * @type {State} */ function cdataClose(code) { if (code === 93) { effects.consume(code); return cdataEnd; } return cdata(code); } /** * In CDATA, after `]]`, at `>`. * * ```markdown * > | a &<]]> b * ^ * ``` * * @type {State} */ function cdataEnd(code) { if (code === 62) { return end(code); } if (code === 93) { effects.consume(code); return cdataEnd; } return cdata(code); } /** * In declaration. * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function declaration(code) { if (code === null || code === 62) { return end(code); } if (markdownLineEnding(code)) { returnState = declaration; return lineEndingBefore(code); } effects.consume(code); return declaration; } /** * In instruction. * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function instruction(code) { if (code === null) { return nok(code); } if (code === 63) { effects.consume(code); return instructionClose; } if (markdownLineEnding(code)) { returnState = instruction; return lineEndingBefore(code); } effects.consume(code); return instruction; } /** * In instruction, after `?`, at `>`. * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function instructionClose(code) { return code === 62 ? end(code) : instruction(code); } /** * After ` | a c * ^ * ``` * * @type {State} */ function tagCloseStart(code) { // ASCII alphabetical. if (asciiAlpha(code)) { effects.consume(code); return tagClose; } return nok(code); } /** * After ` | a c * ^ * ``` * * @type {State} */ function tagClose(code) { // ASCII alphanumerical and `-`. if (code === 45 || asciiAlphanumeric(code)) { effects.consume(code); return tagClose; } return tagCloseBetween(code); } /** * In closing tag, after tag name. * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function tagCloseBetween(code) { if (markdownLineEnding(code)) { returnState = tagCloseBetween; return lineEndingBefore(code); } if (markdownSpace(code)) { effects.consume(code); return tagCloseBetween; } return end(code); } /** * After ` | a c * ^ * ``` * * @type {State} */ function tagOpen(code) { // ASCII alphanumerical and `-`. if (code === 45 || asciiAlphanumeric(code)) { effects.consume(code); return tagOpen; } if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) { return tagOpenBetween(code); } return nok(code); } /** * In opening tag, after tag name. * * ```markdown * > | a c * ^ * ``` * * @type {State} */ function tagOpenBetween(code) { if (code === 47) { effects.consume(code); return end; } // ASCII alphabetical and `:` and `_`. if (code === 58 || code === 95 || asciiAlpha(code)) { effects.consume(code); return tagOpenAttributeName; } if (markdownLineEnding(code)) { returnState = tagOpenBetween; return lineEndingBefore(code); } if (markdownSpace(code)) { effects.consume(code); return tagOpenBetween; } return end(code); } /** * In attribute name. * * ```markdown * > | a d * ^ * ``` * * @type {State} */ function tagOpenAttributeName(code) { // ASCII alphabetical and `-`, `.`, `:`, and `_`. if (code === 45 || code === 46 || code === 58 || code === 95 || asciiAlphanumeric(code)) { effects.consume(code); return tagOpenAttributeName; } return tagOpenAttributeNameAfter(code); } /** * After attribute name, before initializer, the end of the tag, or * whitespace. * * ```markdown * > | a d * ^ * ``` * * @type {State} */ function tagOpenAttributeNameAfter(code) { if (code === 61) { effects.consume(code); return tagOpenAttributeValueBefore; } if (markdownLineEnding(code)) { returnState = tagOpenAttributeNameAfter; return lineEndingBefore(code); } if (markdownSpace(code)) { effects.consume(code); return tagOpenAttributeNameAfter; } return tagOpenBetween(code); } /** * Before unquoted, double quoted, or single quoted attribute value, allowing * whitespace. * * ```markdown * > | a e * ^ * ``` * * @type {State} */ function tagOpenAttributeValueBefore(code) { if (code === null || code === 60 || code === 61 || code === 62 || code === 96) { return nok(code); } if (code === 34 || code === 39) { effects.consume(code); marker = code; return tagOpenAttributeValueQuoted; } if (markdownLineEnding(code)) { returnState = tagOpenAttributeValueBefore; return lineEndingBefore(code); } if (markdownSpace(code)) { effects.consume(code); return tagOpenAttributeValueBefore; } effects.consume(code); return tagOpenAttributeValueUnquoted; } /** * In double or single quoted attribute value. * * ```markdown * > | a e * ^ * ``` * * @type {State} */ function tagOpenAttributeValueQuoted(code) { if (code === marker) { effects.consume(code); marker = undefined; return tagOpenAttributeValueQuotedAfter; } if (code === null) { return nok(code); } if (markdownLineEnding(code)) { returnState = tagOpenAttributeValueQuoted; return lineEndingBefore(code); } effects.consume(code); return tagOpenAttributeValueQuoted; } /** * In unquoted attribute value. * * ```markdown * > | a e * ^ * ``` * * @type {State} */ function tagOpenAttributeValueUnquoted(code) { if (code === null || code === 34 || code === 39 || code === 60 || code === 61 || code === 96) { return nok(code); } if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) { return tagOpenBetween(code); } effects.consume(code); return tagOpenAttributeValueUnquoted; } /** * After double or single quoted attribute value, before whitespace or the end * of the tag. * * ```markdown * > | a e * ^ * ``` * * @type {State} */ function tagOpenAttributeValueQuotedAfter(code) { if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) { return tagOpenBetween(code); } return nok(code); } /** * In certain circumstances of a tag where only an `>` is allowed. * * ```markdown * > | a e * ^ * ``` * * @type {State} */ function end(code) { if (code === 62) { effects.consume(code); effects.exit("htmlTextData"); effects.exit("htmlText"); return ok; } return nok(code); } /** * At eol. * * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about * > empty tokens. * * ```markdown * > | a * ``` * * @type {State} */ function lineEndingBefore(code) { effects.exit("htmlTextData"); effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return lineEndingAfter; } /** * After eol, at optional whitespace. * * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about * > empty tokens. * * ```markdown * | a * ^ * ``` * * @type {State} */ function lineEndingAfter(code) { // Always populated by defaults. return markdownSpace(code) ? factorySpace(effects, lineEndingAfterPrefix, "linePrefix", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code) : lineEndingAfterPrefix(code); } /** * After eol, after optional whitespace. * * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about * > empty tokens. * * ```markdown * | a * ^ * ``` * * @type {State} */ function lineEndingAfterPrefix(code) { effects.enter("htmlTextData"); return returnState(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').Event} Event * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const labelEnd = { name: 'labelEnd', tokenize: tokenizeLabelEnd, resolveTo: resolveToLabelEnd, resolveAll: resolveAllLabelEnd }; /** @type {Construct} */ const resourceConstruct = { tokenize: tokenizeResource }; /** @type {Construct} */ const referenceFullConstruct = { tokenize: tokenizeReferenceFull }; /** @type {Construct} */ const referenceCollapsedConstruct = { tokenize: tokenizeReferenceCollapsed }; /** @type {Resolver} */ function resolveAllLabelEnd(events) { let index = -1; while (++index < events.length) { const token = events[index][1]; if (token.type === "labelImage" || token.type === "labelLink" || token.type === "labelEnd") { // Remove the marker. events.splice(index + 1, token.type === "labelImage" ? 4 : 2); token.type = "data"; index++; } } return events; } /** @type {Resolver} */ function resolveToLabelEnd(events, context) { let index = events.length; let offset = 0; /** @type {Token} */ let token; /** @type {number | undefined} */ let open; /** @type {number | undefined} */ let close; /** @type {Array} */ let media; // Find an opening. while (index--) { token = events[index][1]; if (open) { // If we see another link, or inactive link label, we’ve been here before. if (token.type === "link" || token.type === "labelLink" && token._inactive) { break; } // Mark other link openings as inactive, as we can’t have links in // links. if (events[index][0] === 'enter' && token.type === "labelLink") { token._inactive = true; } } else if (close) { if (events[index][0] === 'enter' && (token.type === "labelImage" || token.type === "labelLink") && !token._balanced) { open = index; if (token.type !== "labelLink") { offset = 2; break; } } } else if (token.type === "labelEnd") { close = index; } } const group = { type: events[open][1].type === "labelLink" ? "link" : "image", start: Object.assign({}, events[open][1].start), end: Object.assign({}, events[events.length - 1][1].end) }; const label = { type: "label", start: Object.assign({}, events[open][1].start), end: Object.assign({}, events[close][1].end) }; const text = { type: "labelText", start: Object.assign({}, events[open + offset + 2][1].end), end: Object.assign({}, events[close - 2][1].start) }; media = [['enter', group, context], ['enter', label, context]]; // Opening marker. media = push(media, events.slice(open + 1, open + offset + 3)); // Text open. media = push(media, [['enter', text, context]]); // Always populated by defaults. // Between. media = push(media, resolveAll(context.parser.constructs.insideSpan.null, events.slice(open + offset + 4, close - 3), context)); // Text close, marker close, label close. media = push(media, [['exit', text, context], events[close - 2], events[close - 1], ['exit', label, context]]); // Reference, resource, or so. media = push(media, events.slice(close + 1)); // Media close. media = push(media, [['exit', group, context]]); splice$1(events, open, events.length, media); return events; } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeLabelEnd(effects, ok, nok) { const self = this; let index = self.events.length; /** @type {Token} */ let labelStart; /** @type {boolean} */ let defined; // Find an opening. while (index--) { if ((self.events[index][1].type === "labelImage" || self.events[index][1].type === "labelLink") && !self.events[index][1]._balanced) { labelStart = self.events[index][1]; break; } } return start; /** * Start of label end. * * ```markdown * > | [a](b) c * ^ * > | [a][b] c * ^ * > | [a][] b * ^ * > | [a] b * ``` * * @type {State} */ function start(code) { // If there is not an okay opening. if (!labelStart) { return nok(code); } // If the corresponding label (link) start is marked as inactive, // it means we’d be wrapping a link, like this: // // ```markdown // > | a [b [c](d) e](f) g. // ^ // ``` // // We can’t have that, so it’s just balanced brackets. if (labelStart._inactive) { return labelEndNok(code); } defined = self.parser.defined.includes(normalizeIdentifier(self.sliceSerialize({ start: labelStart.end, end: self.now() }))); effects.enter("labelEnd"); effects.enter("labelMarker"); effects.consume(code); effects.exit("labelMarker"); effects.exit("labelEnd"); return after; } /** * After `]`. * * ```markdown * > | [a](b) c * ^ * > | [a][b] c * ^ * > | [a][] b * ^ * > | [a] b * ^ * ``` * * @type {State} */ function after(code) { // Note: `markdown-rs` also parses GFM footnotes here, which for us is in // an extension. // Resource (`[asd](fgh)`)? if (code === 40) { return effects.attempt(resourceConstruct, labelEndOk, defined ? labelEndOk : labelEndNok)(code); } // Full (`[asd][fgh]`) or collapsed (`[asd][]`) reference? if (code === 91) { return effects.attempt(referenceFullConstruct, labelEndOk, defined ? referenceNotFull : labelEndNok)(code); } // Shortcut (`[asd]`) reference? return defined ? labelEndOk(code) : labelEndNok(code); } /** * After `]`, at `[`, but not at a full reference. * * > 👉 **Note**: we only get here if the label is defined. * * ```markdown * > | [a][] b * ^ * > | [a] b * ^ * ``` * * @type {State} */ function referenceNotFull(code) { return effects.attempt(referenceCollapsedConstruct, labelEndOk, labelEndNok)(code); } /** * Done, we found something. * * ```markdown * > | [a](b) c * ^ * > | [a][b] c * ^ * > | [a][] b * ^ * > | [a] b * ^ * ``` * * @type {State} */ function labelEndOk(code) { // Note: `markdown-rs` does a bunch of stuff here. return ok(code); } /** * Done, it’s nothing. * * There was an okay opening, but we didn’t match anything. * * ```markdown * > | [a](b c * ^ * > | [a][b c * ^ * > | [a] b * ^ * ``` * * @type {State} */ function labelEndNok(code) { labelStart._balanced = true; return nok(code); } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeResource(effects, ok, nok) { return resourceStart; /** * At a resource. * * ```markdown * > | [a](b) c * ^ * ``` * * @type {State} */ function resourceStart(code) { effects.enter("resource"); effects.enter("resourceMarker"); effects.consume(code); effects.exit("resourceMarker"); return resourceBefore; } /** * In resource, after `(`, at optional whitespace. * * ```markdown * > | [a](b) c * ^ * ``` * * @type {State} */ function resourceBefore(code) { return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceOpen)(code) : resourceOpen(code); } /** * In resource, after optional whitespace, at `)` or a destination. * * ```markdown * > | [a](b) c * ^ * ``` * * @type {State} */ function resourceOpen(code) { if (code === 41) { return resourceEnd(code); } return factoryDestination(effects, resourceDestinationAfter, resourceDestinationMissing, "resourceDestination", "resourceDestinationLiteral", "resourceDestinationLiteralMarker", "resourceDestinationRaw", "resourceDestinationString", 32)(code); } /** * In resource, after destination, at optional whitespace. * * ```markdown * > | [a](b) c * ^ * ``` * * @type {State} */ function resourceDestinationAfter(code) { return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceBetween)(code) : resourceEnd(code); } /** * At invalid destination. * * ```markdown * > | [a](<<) b * ^ * ``` * * @type {State} */ function resourceDestinationMissing(code) { return nok(code); } /** * In resource, after destination and whitespace, at `(` or title. * * ```markdown * > | [a](b ) c * ^ * ``` * * @type {State} */ function resourceBetween(code) { if (code === 34 || code === 39 || code === 40) { return factoryTitle(effects, resourceTitleAfter, nok, "resourceTitle", "resourceTitleMarker", "resourceTitleString")(code); } return resourceEnd(code); } /** * In resource, after title, at optional whitespace. * * ```markdown * > | [a](b "c") d * ^ * ``` * * @type {State} */ function resourceTitleAfter(code) { return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceEnd)(code) : resourceEnd(code); } /** * In resource, at `)`. * * ```markdown * > | [a](b) d * ^ * ``` * * @type {State} */ function resourceEnd(code) { if (code === 41) { effects.enter("resourceMarker"); effects.consume(code); effects.exit("resourceMarker"); effects.exit("resource"); return ok; } return nok(code); } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeReferenceFull(effects, ok, nok) { const self = this; return referenceFull; /** * In a reference (full), at the `[`. * * ```markdown * > | [a][b] d * ^ * ``` * * @type {State} */ function referenceFull(code) { return factoryLabel.call(self, effects, referenceFullAfter, referenceFullMissing, "reference", "referenceMarker", "referenceString")(code); } /** * In a reference (full), after `]`. * * ```markdown * > | [a][b] d * ^ * ``` * * @type {State} */ function referenceFullAfter(code) { return self.parser.defined.includes(normalizeIdentifier(self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1))) ? ok(code) : nok(code); } /** * In reference (full) that was missing. * * ```markdown * > | [a][b d * ^ * ``` * * @type {State} */ function referenceFullMissing(code) { return nok(code); } } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeReferenceCollapsed(effects, ok, nok) { return referenceCollapsedStart; /** * In reference (collapsed), at `[`. * * > 👉 **Note**: we only get here if the label is defined. * * ```markdown * > | [a][] d * ^ * ``` * * @type {State} */ function referenceCollapsedStart(code) { // We only attempt a collapsed label if there’s a `[`. effects.enter("reference"); effects.enter("referenceMarker"); effects.consume(code); effects.exit("referenceMarker"); return referenceCollapsedOpen; } /** * In reference (collapsed), at `]`. * * > 👉 **Note**: we only get here if the label is defined. * * ```markdown * > | [a][] d * ^ * ``` * * @type {State} */ function referenceCollapsedOpen(code) { if (code === 93) { effects.enter("referenceMarker"); effects.consume(code); effects.exit("referenceMarker"); effects.exit("reference"); return ok; } return nok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const labelStartImage = { name: 'labelStartImage', tokenize: tokenizeLabelStartImage, resolveAll: labelEnd.resolveAll }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeLabelStartImage(effects, ok, nok) { const self = this; return start; /** * Start of label (image) start. * * ```markdown * > | a ![b] c * ^ * ``` * * @type {State} */ function start(code) { effects.enter("labelImage"); effects.enter("labelImageMarker"); effects.consume(code); effects.exit("labelImageMarker"); return open; } /** * After `!`, at `[`. * * ```markdown * > | a ![b] c * ^ * ``` * * @type {State} */ function open(code) { if (code === 91) { effects.enter("labelMarker"); effects.consume(code); effects.exit("labelMarker"); effects.exit("labelImage"); return after; } return nok(code); } /** * After `![`. * * ```markdown * > | a ![b] c * ^ * ``` * * This is needed in because, when GFM footnotes are enabled, images never * form when started with a `^`. * Instead, links form: * * ```markdown * ![^a](b) * * ![^a][b] * * [b]: c * ``` * * ```html *

!^a

*

!^a

* ``` * * @type {State} */ function after(code) { // To do: use a new field to do this, this is still needed for // `micromark-extension-gfm-footnote`, but the `label-start-link` // behavior isn’t. // Hidden footnotes hook. /* c8 ignore next 3 */ return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs ? nok(code) : ok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const labelStartLink = { name: 'labelStartLink', tokenize: tokenizeLabelStartLink, resolveAll: labelEnd.resolveAll }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeLabelStartLink(effects, ok, nok) { const self = this; return start; /** * Start of label (link) start. * * ```markdown * > | a [b] c * ^ * ``` * * @type {State} */ function start(code) { effects.enter("labelLink"); effects.enter("labelMarker"); effects.consume(code); effects.exit("labelMarker"); effects.exit("labelLink"); return after; } /** @type {State} */ function after(code) { // To do: this isn’t needed in `micromark-extension-gfm-footnote`, // remove. // Hidden footnotes hook. /* c8 ignore next 3 */ return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs ? nok(code) : ok(code); } } /** * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const lineEnding = { name: 'lineEnding', tokenize: tokenizeLineEnding }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeLineEnding(effects, ok) { return start; /** @type {State} */ function start(code) { effects.enter("lineEnding"); effects.consume(code); effects.exit("lineEnding"); return factorySpace(effects, ok, "linePrefix"); } } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const thematicBreak$1 = { name: 'thematicBreak', tokenize: tokenizeThematicBreak }; /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeThematicBreak(effects, ok, nok) { let size = 0; /** @type {NonNullable} */ let marker; return start; /** * Start of thematic break. * * ```markdown * > | *** * ^ * ``` * * @type {State} */ function start(code) { effects.enter("thematicBreak"); // To do: parse indent like `markdown-rs`. return before(code); } /** * After optional whitespace, at marker. * * ```markdown * > | *** * ^ * ``` * * @type {State} */ function before(code) { marker = code; return atBreak(code); } /** * After something, before something else. * * ```markdown * > | *** * ^ * ``` * * @type {State} */ function atBreak(code) { if (code === marker) { effects.enter("thematicBreakSequence"); return sequence(code); } if (size >= 3 && (code === null || markdownLineEnding(code))) { effects.exit("thematicBreak"); return ok(code); } return nok(code); } /** * In sequence. * * ```markdown * > | *** * ^ * ``` * * @type {State} */ function sequence(code) { if (code === marker) { effects.consume(code); size++; return sequence; } effects.exit("thematicBreakSequence"); return markdownSpace(code) ? factorySpace(effects, atBreak, "whitespace")(code) : atBreak(code); } } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').ContainerState} ContainerState * @typedef {import('micromark-util-types').Exiter} Exiter * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const list$3 = { name: 'list', tokenize: tokenizeListStart, continuation: { tokenize: tokenizeListContinuation }, exit: tokenizeListEnd }; /** @type {Construct} */ const listItemPrefixWhitespaceConstruct = { tokenize: tokenizeListItemPrefixWhitespace, partial: true }; /** @type {Construct} */ const indentConstruct = { tokenize: tokenizeIndent, partial: true }; // To do: `markdown-rs` parses list items on their own and later stitches them // together. /** * @type {Tokenizer} * @this {TokenizeContext} */ function tokenizeListStart(effects, ok, nok) { const self = this; const tail = self.events[self.events.length - 1]; let initialSize = tail && tail[1].type === "linePrefix" ? tail[2].sliceSerialize(tail[1], true).length : 0; let size = 0; return start; /** @type {State} */ function start(code) { const kind = self.containerState.type || (code === 42 || code === 43 || code === 45 ? "listUnordered" : "listOrdered"); if (kind === "listUnordered" ? !self.containerState.marker || code === self.containerState.marker : asciiDigit(code)) { if (!self.containerState.type) { self.containerState.type = kind; effects.enter(kind, { _container: true }); } if (kind === "listUnordered") { effects.enter("listItemPrefix"); return code === 42 || code === 45 ? effects.check(thematicBreak$1, nok, atMarker)(code) : atMarker(code); } if (!self.interrupt || code === 49) { effects.enter("listItemPrefix"); effects.enter("listItemValue"); return inside(code); } } return nok(code); } /** @type {State} */ function inside(code) { if (asciiDigit(code) && ++size < 10) { effects.consume(code); return inside; } if ((!self.interrupt || size < 2) && (self.containerState.marker ? code === self.containerState.marker : code === 41 || code === 46)) { effects.exit("listItemValue"); return atMarker(code); } return nok(code); } /** * @type {State} **/ function atMarker(code) { effects.enter("listItemMarker"); effects.consume(code); effects.exit("listItemMarker"); self.containerState.marker = self.containerState.marker || code; return effects.check(blankLine, // Can’t be empty when interrupting. self.interrupt ? nok : onBlank, effects.attempt(listItemPrefixWhitespaceConstruct, endOfPrefix, otherPrefix)); } /** @type {State} */ function onBlank(code) { self.containerState.initialBlankLine = true; initialSize++; return endOfPrefix(code); } /** @type {State} */ function otherPrefix(code) { if (markdownSpace(code)) { effects.enter("listItemPrefixWhitespace"); effects.consume(code); effects.exit("listItemPrefixWhitespace"); return endOfPrefix; } return nok(code); } /** @type {State} */ function endOfPrefix(code) { self.containerState.size = initialSize + self.sliceSerialize(effects.exit("listItemPrefix"), true).length; return ok(code); } } /** * @type {Tokenizer} * @this {TokenizeContext} */ function tokenizeListContinuation(effects, ok, nok) { const self = this; self.containerState._closeFlow = undefined; return effects.check(blankLine, onBlank, notBlank); /** @type {State} */ function onBlank(code) { self.containerState.furtherBlankLines = self.containerState.furtherBlankLines || self.containerState.initialBlankLine; // We have a blank line. // Still, try to consume at most the items size. return factorySpace(effects, ok, "listItemIndent", self.containerState.size + 1)(code); } /** @type {State} */ function notBlank(code) { if (self.containerState.furtherBlankLines || !markdownSpace(code)) { self.containerState.furtherBlankLines = undefined; self.containerState.initialBlankLine = undefined; return notInCurrentItem(code); } self.containerState.furtherBlankLines = undefined; self.containerState.initialBlankLine = undefined; return effects.attempt(indentConstruct, ok, notInCurrentItem)(code); } /** @type {State} */ function notInCurrentItem(code) { // While we do continue, we signal that the flow should be closed. self.containerState._closeFlow = true; // As we’re closing flow, we’re no longer interrupting. self.interrupt = undefined; // Always populated by defaults. return factorySpace(effects, effects.attempt(list$3, ok, nok), "linePrefix", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code); } } /** * @type {Tokenizer} * @this {TokenizeContext} */ function tokenizeIndent(effects, ok, nok) { const self = this; return factorySpace(effects, afterPrefix, "listItemIndent", self.containerState.size + 1); /** @type {State} */ function afterPrefix(code) { const tail = self.events[self.events.length - 1]; return tail && tail[1].type === "listItemIndent" && tail[2].sliceSerialize(tail[1], true).length === self.containerState.size ? ok(code) : nok(code); } } /** * @type {Exiter} * @this {TokenizeContext} */ function tokenizeListEnd(effects) { effects.exit(this.containerState.type); } /** * @type {Tokenizer} * @this {TokenizeContext} */ function tokenizeListItemPrefixWhitespace(effects, ok, nok) { const self = this; // Always populated by defaults. return factorySpace(effects, afterPrefix, "listItemPrefixWhitespace", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4 + 1); /** @type {State} */ function afterPrefix(code) { const tail = self.events[self.events.length - 1]; return !markdownSpace(code) && tail && tail[1].type === "listItemPrefixWhitespace" ? ok(code) : nok(code); } } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Tokenizer} Tokenizer */ /** @type {Construct} */ const setextUnderline = { name: 'setextUnderline', tokenize: tokenizeSetextUnderline, resolveTo: resolveToSetextUnderline }; /** @type {Resolver} */ function resolveToSetextUnderline(events, context) { // To do: resolve like `markdown-rs`. let index = events.length; /** @type {number | undefined} */ let content; /** @type {number | undefined} */ let text; /** @type {number | undefined} */ let definition; // Find the opening of the content. // It’ll always exist: we don’t tokenize if it isn’t there. while (index--) { if (events[index][0] === 'enter') { if (events[index][1].type === "content") { content = index; break; } if (events[index][1].type === "paragraph") { text = index; } } // Exit else { if (events[index][1].type === "content") { // Remove the content end (if needed we’ll add it later) events.splice(index, 1); } if (!definition && events[index][1].type === "definition") { definition = index; } } } const heading = { type: "setextHeading", start: Object.assign({}, events[text][1].start), end: Object.assign({}, events[events.length - 1][1].end) }; // Change the paragraph to setext heading text. events[text][1].type = "setextHeadingText"; // If we have definitions in the content, we’ll keep on having content, // but we need move it. if (definition) { events.splice(text, 0, ['enter', heading, context]); events.splice(definition + 1, 0, ['exit', events[content][1], context]); events[content][1].end = Object.assign({}, events[definition][1].end); } else { events[content][1] = heading; } // Add the heading exit at the end. events.push(['exit', heading, context]); return events; } /** * @this {TokenizeContext} * @type {Tokenizer} */ function tokenizeSetextUnderline(effects, ok, nok) { const self = this; /** @type {NonNullable} */ let marker; return start; /** * At start of heading (setext) underline. * * ```markdown * | aa * > | == * ^ * ``` * * @type {State} */ function start(code) { let index = self.events.length; /** @type {boolean | undefined} */ let paragraph; // Find an opening. while (index--) { // Skip enter/exit of line ending, line prefix, and content. // We can now either have a definition or a paragraph. if (self.events[index][1].type !== "lineEnding" && self.events[index][1].type !== "linePrefix" && self.events[index][1].type !== "content") { paragraph = self.events[index][1].type === "paragraph"; break; } } // To do: handle lazy/pierce like `markdown-rs`. // To do: parse indent like `markdown-rs`. if (!self.parser.lazy[self.now().line] && (self.interrupt || paragraph)) { effects.enter("setextHeadingLine"); marker = code; return before(code); } return nok(code); } /** * After optional whitespace, at `-` or `=`. * * ```markdown * | aa * > | == * ^ * ``` * * @type {State} */ function before(code) { effects.enter("setextHeadingLineSequence"); return inside(code); } /** * In sequence. * * ```markdown * | aa * > | == * ^ * ``` * * @type {State} */ function inside(code) { if (code === marker) { effects.consume(code); return inside; } effects.exit("setextHeadingLineSequence"); return markdownSpace(code) ? factorySpace(effects, after, "lineSuffix")(code) : after(code); } /** * After sequence, after optional whitespace. * * ```markdown * | aa * > | == * ^ * ``` * * @type {State} */ function after(code) { if (code === null || markdownLineEnding(code)) { effects.exit("setextHeadingLine"); return ok(code); } return nok(code); } } /** * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct * @typedef {import('micromark-util-types').Initializer} Initializer * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext */ /** @type {InitialConstruct} */ const flow$1 = { tokenize: initializeFlow }; /** * @this {TokenizeContext} * @type {Initializer} */ function initializeFlow(effects) { const self = this; const initial = effects.attempt( // Try to parse a blank line. blankLine, atBlankEnding, // Try to parse initial flow (essentially, only code). effects.attempt( this.parser.constructs.flowInitial, afterConstruct, factorySpace( effects, effects.attempt( this.parser.constructs.flow, afterConstruct, effects.attempt(content, afterConstruct) ), 'linePrefix' ) ) ); return initial /** @type {State} */ function atBlankEnding(code) { if (code === null) { effects.consume(code); return } effects.enter('lineEndingBlank'); effects.consume(code); effects.exit('lineEndingBlank'); self.currentConstruct = undefined; return initial } /** @type {State} */ function afterConstruct(code) { if (code === null) { effects.consume(code); return } effects.enter('lineEnding'); effects.consume(code); effects.exit('lineEnding'); self.currentConstruct = undefined; return initial } } /** * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct * @typedef {import('micromark-util-types').Initializer} Initializer * @typedef {import('micromark-util-types').Resolver} Resolver * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext */ const resolver = { resolveAll: createResolver() }; const string$1 = initializeFactory('string'); const text$2 = initializeFactory('text'); /** * @param {'string' | 'text'} field * @returns {InitialConstruct} */ function initializeFactory(field) { return { tokenize: initializeText, resolveAll: createResolver( field === 'text' ? resolveAllLineSuffixes : undefined ) } /** * @this {TokenizeContext} * @type {Initializer} */ function initializeText(effects) { const self = this; const constructs = this.parser.constructs[field]; const text = effects.attempt(constructs, start, notText); return start /** @type {State} */ function start(code) { return atBreak(code) ? text(code) : notText(code) } /** @type {State} */ function notText(code) { if (code === null) { effects.consume(code); return } effects.enter('data'); effects.consume(code); return data } /** @type {State} */ function data(code) { if (atBreak(code)) { effects.exit('data'); return text(code) } // Data. effects.consume(code); return data } /** * @param {Code} code * @returns {boolean} */ function atBreak(code) { if (code === null) { return true } const list = constructs[code]; let index = -1; if (list) { // Always populated by defaults. while (++index < list.length) { const item = list[index]; if (!item.previous || item.previous.call(self, self.previous)) { return true } } } return false } } } /** * @param {Resolver | undefined} [extraResolver] * @returns {Resolver} */ function createResolver(extraResolver) { return resolveAllText /** @type {Resolver} */ function resolveAllText(events, context) { let index = -1; /** @type {number | undefined} */ let enter; // A rather boring computation (to merge adjacent `data` events) which // improves mm performance by 29%. while (++index <= events.length) { if (enter === undefined) { if (events[index] && events[index][1].type === 'data') { enter = index; index++; } } else if (!events[index] || events[index][1].type !== 'data') { // Don’t do anything if there is one data token. if (index !== enter + 2) { events[enter][1].end = events[index - 1][1].end; events.splice(enter + 2, index - enter - 2); index = enter + 2; } enter = undefined; } } return extraResolver ? extraResolver(events, context) : events } } /** * A rather ugly set of instructions which again looks at chunks in the input * stream. * The reason to do this here is that it is *much* faster to parse in reverse. * And that we can’t hook into `null` to split the line suffix before an EOF. * To do: figure out if we can make this into a clean utility, or even in core. * As it will be useful for GFMs literal autolink extension (and maybe even * tables?) * * @type {Resolver} */ function resolveAllLineSuffixes(events, context) { let eventIndex = 0; // Skip first. while (++eventIndex <= events.length) { if ( (eventIndex === events.length || events[eventIndex][1].type === 'lineEnding') && events[eventIndex - 1][1].type === 'data' ) { const data = events[eventIndex - 1][1]; const chunks = context.sliceStream(data); let index = chunks.length; let bufferIndex = -1; let size = 0; /** @type {boolean | undefined} */ let tabs; while (index--) { const chunk = chunks[index]; if (typeof chunk === 'string') { bufferIndex = chunk.length; while (chunk.charCodeAt(bufferIndex - 1) === 32) { size++; bufferIndex--; } if (bufferIndex) break bufferIndex = -1; } // Number else if (chunk === -2) { tabs = true; size++; } else if (chunk === -1) ; else { // Replacement character, exit. index++; break } } if (size) { const token = { type: eventIndex === events.length || tabs || size < 2 ? 'lineSuffix' : 'hardBreakTrailing', start: { line: data.end.line, column: data.end.column - size, offset: data.end.offset - size, _index: data.start._index + index, _bufferIndex: index ? bufferIndex : data.start._bufferIndex + bufferIndex }, end: Object.assign({}, data.end) }; data.end = Object.assign({}, token.start); if (data.start.offset === data.end.offset) { Object.assign(data, token); } else { events.splice( eventIndex, 0, ['enter', token, context], ['exit', token, context] ); eventIndex += 2; } } eventIndex++; } } return events } /** * @typedef {import('micromark-util-types').Chunk} Chunk * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Construct} Construct * @typedef {import('micromark-util-types').ConstructRecord} ConstructRecord * @typedef {import('micromark-util-types').Effects} Effects * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct * @typedef {import('micromark-util-types').ParseContext} ParseContext * @typedef {import('micromark-util-types').Point} Point * @typedef {import('micromark-util-types').State} State * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenType} TokenType * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext */ /** * Create a tokenizer. * Tokenizers deal with one type of data (e.g., containers, flow, text). * The parser is the object dealing with it all. * `initialize` works like other constructs, except that only its `tokenize` * function is used, in which case it doesn’t receive an `ok` or `nok`. * `from` can be given to set the point before the first character, although * when further lines are indented, they must be set with `defineSkip`. * * @param {ParseContext} parser * @param {InitialConstruct} initialize * @param {Omit | undefined} [from] * @returns {TokenizeContext} */ function createTokenizer(parser, initialize, from) { /** @type {Point} */ let point = Object.assign( from ? Object.assign({}, from) : { line: 1, column: 1, offset: 0 }, { _index: 0, _bufferIndex: -1 } ); /** @type {Record} */ const columnStart = {}; /** @type {Array} */ const resolveAllConstructs = []; /** @type {Array} */ let chunks = []; /** @type {Array} */ let stack = []; /** * Tools used for tokenizing. * * @type {Effects} */ const effects = { consume, enter, exit, attempt: constructFactory(onsuccessfulconstruct), check: constructFactory(onsuccessfulcheck), interrupt: constructFactory(onsuccessfulcheck, { interrupt: true }) }; /** * State and tools for resolving and serializing. * * @type {TokenizeContext} */ const context = { previous: null, code: null, containerState: {}, events: [], parser, sliceStream, sliceSerialize, now, defineSkip, write }; /** * The state function. * * @type {State | undefined} */ let state = initialize.tokenize.call(context, effects); if (initialize.resolveAll) { resolveAllConstructs.push(initialize); } return context /** @type {TokenizeContext['write']} */ function write(slice) { chunks = push(chunks, slice); main(); // Exit if we’re not done, resolve might change stuff. if (chunks[chunks.length - 1] !== null) { return [] } addResult(initialize, 0); // Otherwise, resolve, and exit. context.events = resolveAll(resolveAllConstructs, context.events, context); return context.events } // // Tools. // /** @type {TokenizeContext['sliceSerialize']} */ function sliceSerialize(token, expandTabs) { return serializeChunks(sliceStream(token), expandTabs) } /** @type {TokenizeContext['sliceStream']} */ function sliceStream(token) { return sliceChunks(chunks, token) } /** @type {TokenizeContext['now']} */ function now() { // This is a hot path, so we clone manually instead of `Object.assign({}, point)` const {line, column, offset, _index, _bufferIndex} = point; return { line, column, offset, _index, _bufferIndex } } /** @type {TokenizeContext['defineSkip']} */ function defineSkip(value) { columnStart[value.line] = value.column; accountForPotentialSkip(); } // // State management. // /** * Main loop (note that `_index` and `_bufferIndex` in `point` are modified by * `consume`). * Here is where we walk through the chunks, which either include strings of * several characters, or numerical character codes. * The reason to do this in a loop instead of a call is so the stack can * drain. * * @returns {undefined} */ function main() { /** @type {number} */ let chunkIndex; while (point._index < chunks.length) { const chunk = chunks[point._index]; // If we’re in a buffer chunk, loop through it. if (typeof chunk === 'string') { chunkIndex = point._index; if (point._bufferIndex < 0) { point._bufferIndex = 0; } while ( point._index === chunkIndex && point._bufferIndex < chunk.length ) { go(chunk.charCodeAt(point._bufferIndex)); } } else { go(chunk); } } } /** * Deal with one code. * * @param {Code} code * @returns {undefined} */ function go(code) { state = state(code); } /** @type {Effects['consume']} */ function consume(code) { if (markdownLineEnding(code)) { point.line++; point.column = 1; point.offset += code === -3 ? 2 : 1; accountForPotentialSkip(); } else if (code !== -1) { point.column++; point.offset++; } // Not in a string chunk. if (point._bufferIndex < 0) { point._index++; } else { point._bufferIndex++; // At end of string chunk. // @ts-expect-error Points w/ non-negative `_bufferIndex` reference // strings. if (point._bufferIndex === chunks[point._index].length) { point._bufferIndex = -1; point._index++; } } // Expose the previous character. context.previous = code; } /** @type {Effects['enter']} */ function enter(type, fields) { /** @type {Token} */ // @ts-expect-error Patch instead of assign required fields to help GC. const token = fields || {}; token.type = type; token.start = now(); context.events.push(['enter', token, context]); stack.push(token); return token } /** @type {Effects['exit']} */ function exit(type) { const token = stack.pop(); token.end = now(); context.events.push(['exit', token, context]); return token } /** * Use results. * * @type {ReturnHandle} */ function onsuccessfulconstruct(construct, info) { addResult(construct, info.from); } /** * Discard results. * * @type {ReturnHandle} */ function onsuccessfulcheck(_, info) { info.restore(); } /** * Factory to attempt/check/interrupt. * * @param {ReturnHandle} onreturn * @param {{interrupt?: boolean | undefined} | undefined} [fields] */ function constructFactory(onreturn, fields) { return hook /** * Handle either an object mapping codes to constructs, a list of * constructs, or a single construct. * * @param {Array | Construct | ConstructRecord} constructs * @param {State} returnState * @param {State | undefined} [bogusState] * @returns {State} */ function hook(constructs, returnState, bogusState) { /** @type {Array} */ let listOfConstructs; /** @type {number} */ let constructIndex; /** @type {Construct} */ let currentConstruct; /** @type {Info} */ let info; return Array.isArray(constructs) /* c8 ignore next 1 */ ? handleListOfConstructs(constructs) : 'tokenize' in constructs ? // @ts-expect-error Looks like a construct. handleListOfConstructs([constructs]) : handleMapOfConstructs(constructs) /** * Handle a list of construct. * * @param {ConstructRecord} map * @returns {State} */ function handleMapOfConstructs(map) { return start /** @type {State} */ function start(code) { const def = code !== null && map[code]; const all = code !== null && map.null; const list = [ // To do: add more extension tests. /* c8 ignore next 2 */ ...(Array.isArray(def) ? def : def ? [def] : []), ...(Array.isArray(all) ? all : all ? [all] : []) ]; return handleListOfConstructs(list)(code) } } /** * Handle a list of construct. * * @param {Array} list * @returns {State} */ function handleListOfConstructs(list) { listOfConstructs = list; constructIndex = 0; if (list.length === 0) { return bogusState } return handleConstruct(list[constructIndex]) } /** * Handle a single construct. * * @param {Construct} construct * @returns {State} */ function handleConstruct(construct) { return start /** @type {State} */ function start(code) { // To do: not needed to store if there is no bogus state, probably? // Currently doesn’t work because `inspect` in document does a check // w/o a bogus, which doesn’t make sense. But it does seem to help perf // by not storing. info = store(); currentConstruct = construct; if (!construct.partial) { context.currentConstruct = construct; } // Always populated by defaults. if ( construct.name && context.parser.constructs.disable.null.includes(construct.name) ) { return nok() } return construct.tokenize.call( // If we do have fields, create an object w/ `context` as its // prototype. // This allows a “live binding”, which is needed for `interrupt`. fields ? Object.assign(Object.create(context), fields) : context, effects, ok, nok )(code) } } /** @type {State} */ function ok(code) { onreturn(currentConstruct, info); return returnState } /** @type {State} */ function nok(code) { info.restore(); if (++constructIndex < listOfConstructs.length) { return handleConstruct(listOfConstructs[constructIndex]) } return bogusState } } } /** * @param {Construct} construct * @param {number} from * @returns {undefined} */ function addResult(construct, from) { if (construct.resolveAll && !resolveAllConstructs.includes(construct)) { resolveAllConstructs.push(construct); } if (construct.resolve) { splice$1( context.events, from, context.events.length - from, construct.resolve(context.events.slice(from), context) ); } if (construct.resolveTo) { context.events = construct.resolveTo(context.events, context); } } /** * Store state. * * @returns {Info} */ function store() { const startPoint = now(); const startPrevious = context.previous; const startCurrentConstruct = context.currentConstruct; const startEventsIndex = context.events.length; const startStack = Array.from(stack); return { restore, from: startEventsIndex } /** * Restore state. * * @returns {undefined} */ function restore() { point = startPoint; context.previous = startPrevious; context.currentConstruct = startCurrentConstruct; context.events.length = startEventsIndex; stack = startStack; accountForPotentialSkip(); } } /** * Move the current point a bit forward in the line when it’s on a column * skip. * * @returns {undefined} */ function accountForPotentialSkip() { if (point.line in columnStart && point.column < 2) { point.column = columnStart[point.line]; point.offset += columnStart[point.line] - 1; } } } /** * Get the chunks from a slice of chunks in the range of a token. * * @param {Array} chunks * @param {Pick} token * @returns {Array} */ function sliceChunks(chunks, token) { const startIndex = token.start._index; const startBufferIndex = token.start._bufferIndex; const endIndex = token.end._index; const endBufferIndex = token.end._bufferIndex; /** @type {Array} */ let view; if (startIndex === endIndex) { // @ts-expect-error `_bufferIndex` is used on string chunks. view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]; } else { view = chunks.slice(startIndex, endIndex); if (startBufferIndex > -1) { const head = view[0]; if (typeof head === 'string') { view[0] = head.slice(startBufferIndex); } else { view.shift(); } } if (endBufferIndex > 0) { // @ts-expect-error `_bufferIndex` is used on string chunks. view.push(chunks[endIndex].slice(0, endBufferIndex)); } } return view } /** * Get the string value of a slice of chunks. * * @param {Array} chunks * @param {boolean | undefined} [expandTabs=false] * @returns {string} */ function serializeChunks(chunks, expandTabs) { let index = -1; /** @type {Array} */ const result = []; /** @type {boolean | undefined} */ let atTab; while (++index < chunks.length) { const chunk = chunks[index]; /** @type {string} */ let value; if (typeof chunk === 'string') { value = chunk; } else switch (chunk) { case -5: { value = '\r'; break } case -4: { value = '\n'; break } case -3: { value = '\r' + '\n'; break } case -2: { value = expandTabs ? ' ' : '\t'; break } case -1: { if (!expandTabs && atTab) continue value = ' '; break } default: { // Currently only replacement character. value = String.fromCharCode(chunk); } } atTab = chunk === -2; result.push(value); } return result.join('') } /** * @typedef {import('micromark-util-types').Extension} Extension */ /** @satisfies {Extension['document']} */ const document$1 = { [42]: list$3, [43]: list$3, [45]: list$3, [48]: list$3, [49]: list$3, [50]: list$3, [51]: list$3, [52]: list$3, [53]: list$3, [54]: list$3, [55]: list$3, [56]: list$3, [57]: list$3, [62]: blockQuote }; /** @satisfies {Extension['contentInitial']} */ const contentInitial = { [91]: definition$1 }; /** @satisfies {Extension['flowInitial']} */ const flowInitial = { [-2]: codeIndented, [-1]: codeIndented, [32]: codeIndented }; /** @satisfies {Extension['flow']} */ const flow = { [35]: headingAtx, [42]: thematicBreak$1, [45]: [setextUnderline, thematicBreak$1], [60]: htmlFlow, [61]: setextUnderline, [95]: thematicBreak$1, [96]: codeFenced, [126]: codeFenced }; /** @satisfies {Extension['string']} */ const string = { [38]: characterReference, [92]: characterEscape }; /** @satisfies {Extension['text']} */ const text$1 = { [-5]: lineEnding, [-4]: lineEnding, [-3]: lineEnding, [33]: labelStartImage, [38]: characterReference, [42]: attention, [60]: [autolink, htmlText], [91]: labelStartLink, [92]: [hardBreakEscape, characterEscape], [93]: labelEnd, [95]: attention, [96]: codeText }; /** @satisfies {Extension['insideSpan']} */ const insideSpan = { null: [attention, resolver] }; /** @satisfies {Extension['attentionMarkers']} */ const attentionMarkers = { null: [42, 95] }; /** @satisfies {Extension['disable']} */ const disable = { null: [] }; var defaultConstructs = /*#__PURE__*/Object.freeze({ __proto__: null, attentionMarkers: attentionMarkers, contentInitial: contentInitial, disable: disable, document: document$1, flow: flow, flowInitial: flowInitial, insideSpan: insideSpan, string: string, text: text$1 }); /** * @typedef {import('micromark-util-types').Create} Create * @typedef {import('micromark-util-types').FullNormalizedExtension} FullNormalizedExtension * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct * @typedef {import('micromark-util-types').ParseContext} ParseContext * @typedef {import('micromark-util-types').ParseOptions} ParseOptions */ /** * @param {ParseOptions | null | undefined} [options] * @returns {ParseContext} */ function parse(options) { const settings = options || {}; const constructs = /** @type {FullNormalizedExtension} */ combineExtensions([defaultConstructs, ...(settings.extensions || [])]); /** @type {ParseContext} */ const parser = { defined: [], lazy: {}, constructs, content: create(content$1), document: create(document$2), flow: create(flow$1), string: create(string$1), text: create(text$2) }; return parser /** * @param {InitialConstruct} initial */ function create(initial) { return creator /** @type {Create} */ function creator(from) { return createTokenizer(parser, initial, from) } } } /** * @typedef {import('micromark-util-types').Event} Event */ /** * @param {Array} events * @returns {Array} */ function postprocess(events) { while (!subtokenize(events)) { // Empty } return events } /** * @typedef {import('micromark-util-types').Chunk} Chunk * @typedef {import('micromark-util-types').Code} Code * @typedef {import('micromark-util-types').Encoding} Encoding * @typedef {import('micromark-util-types').Value} Value */ /** * @callback Preprocessor * @param {Value} value * @param {Encoding | null | undefined} [encoding] * @param {boolean | null | undefined} [end=false] * @returns {Array} */ const search = /[\0\t\n\r]/g; /** * @returns {Preprocessor} */ function preprocess() { let column = 1; let buffer = ''; /** @type {boolean | undefined} */ let start = true; /** @type {boolean | undefined} */ let atCarriageReturn; return preprocessor /** @type {Preprocessor} */ // eslint-disable-next-line complexity function preprocessor(value, encoding, end) { /** @type {Array} */ const chunks = []; /** @type {RegExpMatchArray | null} */ let match; /** @type {number} */ let next; /** @type {number} */ let startPosition; /** @type {number} */ let endPosition; /** @type {Code} */ let code; value = buffer + (typeof value === 'string' ? value.toString() : new TextDecoder(encoding || undefined).decode(value)); startPosition = 0; buffer = ''; if (start) { // To do: `markdown-rs` actually parses BOMs (byte order mark). if (value.charCodeAt(0) === 65279) { startPosition++; } start = undefined; } while (startPosition < value.length) { search.lastIndex = startPosition; match = search.exec(value); endPosition = match && match.index !== undefined ? match.index : value.length; code = value.charCodeAt(endPosition); if (!match) { buffer = value.slice(startPosition); break } if (code === 10 && startPosition === endPosition && atCarriageReturn) { chunks.push(-3); atCarriageReturn = undefined; } else { if (atCarriageReturn) { chunks.push(-5); atCarriageReturn = undefined; } if (startPosition < endPosition) { chunks.push(value.slice(startPosition, endPosition)); column += endPosition - startPosition; } switch (code) { case 0: { chunks.push(65533); column++; break } case 9: { next = Math.ceil(column / 4) * 4; chunks.push(-2); while (column++ < next) chunks.push(-1); break } case 10: { chunks.push(-4); column = 1; break } default: { atCarriageReturn = true; column = 1; } } } startPosition = endPosition + 1; } if (end) { if (atCarriageReturn) chunks.push(-5); if (buffer) chunks.push(buffer); chunks.push(null); } return chunks } } const characterEscapeOrReference = /\\([!-/:-@[-`{-~])|&(#(?:\d{1,7}|x[\da-f]{1,6})|[\da-z]{1,31});/gi; /** * Decode markdown strings (which occur in places such as fenced code info * strings, destinations, labels, and titles). * * The “string” content type allows character escapes and -references. * This decodes those. * * @param {string} value * Value to decode. * @returns {string} * Decoded value. */ function decodeString(value) { return value.replace(characterEscapeOrReference, decode) } /** * @param {string} $0 * @param {string} $1 * @param {string} $2 * @returns {string} */ function decode($0, $1, $2) { if ($1) { // Escape. return $1 } // Reference. const head = $2.charCodeAt(0); if (head === 35) { const head = $2.charCodeAt(1); const hex = head === 120 || head === 88; return decodeNumericCharacterReference($2.slice(hex ? 2 : 1), hex ? 16 : 10) } return decodeNamedCharacterReference($2) || $0 } /** * @typedef {import('mdast').Break} Break * @typedef {import('mdast').Blockquote} Blockquote * @typedef {import('mdast').Code} Code * @typedef {import('mdast').Definition} Definition * @typedef {import('mdast').Emphasis} Emphasis * @typedef {import('mdast').Heading} Heading * @typedef {import('mdast').Html} Html * @typedef {import('mdast').Image} Image * @typedef {import('mdast').InlineCode} InlineCode * @typedef {import('mdast').Link} Link * @typedef {import('mdast').List} List * @typedef {import('mdast').ListItem} ListItem * @typedef {import('mdast').Nodes} Nodes * @typedef {import('mdast').Paragraph} Paragraph * @typedef {import('mdast').Parent} Parent * @typedef {import('mdast').PhrasingContent} PhrasingContent * @typedef {import('mdast').ReferenceType} ReferenceType * @typedef {import('mdast').Root} Root * @typedef {import('mdast').Strong} Strong * @typedef {import('mdast').Text} Text * @typedef {import('mdast').ThematicBreak} ThematicBreak * * @typedef {import('micromark-util-types').Encoding} Encoding * @typedef {import('micromark-util-types').Event} Event * @typedef {import('micromark-util-types').ParseOptions} ParseOptions * @typedef {import('micromark-util-types').Token} Token * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext * @typedef {import('micromark-util-types').Value} Value * * @typedef {import('unist').Point} Point * * @typedef {import('../index.js').CompileData} CompileData */ const own$3 = {}.hasOwnProperty; /** * Turn markdown into a syntax tree. * * @overload * @param {Value} value * @param {Encoding | null | undefined} [encoding] * @param {Options | null | undefined} [options] * @returns {Root} * * @overload * @param {Value} value * @param {Options | null | undefined} [options] * @returns {Root} * * @param {Value} value * Markdown to parse. * @param {Encoding | Options | null | undefined} [encoding] * Character encoding for when `value` is `Buffer`. * @param {Options | null | undefined} [options] * Configuration. * @returns {Root} * mdast tree. */ function fromMarkdown(value, encoding, options) { if (typeof encoding !== 'string') { options = encoding; encoding = undefined; } return compiler(options)(postprocess(parse(options).document().write(preprocess()(value, encoding, true)))); } /** * Note this compiler only understand complete buffering, not streaming. * * @param {Options | null | undefined} [options] */ function compiler(options) { /** @type {Config} */ const config = { transforms: [], canContainEols: ['emphasis', 'fragment', 'heading', 'paragraph', 'strong'], enter: { autolink: opener(link), autolinkProtocol: onenterdata, autolinkEmail: onenterdata, atxHeading: opener(heading), blockQuote: opener(blockQuote), characterEscape: onenterdata, characterReference: onenterdata, codeFenced: opener(codeFlow), codeFencedFenceInfo: buffer, codeFencedFenceMeta: buffer, codeIndented: opener(codeFlow, buffer), codeText: opener(codeText, buffer), codeTextData: onenterdata, data: onenterdata, codeFlowValue: onenterdata, definition: opener(definition), definitionDestinationString: buffer, definitionLabelString: buffer, definitionTitleString: buffer, emphasis: opener(emphasis), hardBreakEscape: opener(hardBreak), hardBreakTrailing: opener(hardBreak), htmlFlow: opener(html, buffer), htmlFlowData: onenterdata, htmlText: opener(html, buffer), htmlTextData: onenterdata, image: opener(image), label: buffer, link: opener(link), listItem: opener(listItem), listItemValue: onenterlistitemvalue, listOrdered: opener(list, onenterlistordered), listUnordered: opener(list), paragraph: opener(paragraph), reference: onenterreference, referenceString: buffer, resourceDestinationString: buffer, resourceTitleString: buffer, setextHeading: opener(heading), strong: opener(strong), thematicBreak: opener(thematicBreak) }, exit: { atxHeading: closer(), atxHeadingSequence: onexitatxheadingsequence, autolink: closer(), autolinkEmail: onexitautolinkemail, autolinkProtocol: onexitautolinkprotocol, blockQuote: closer(), characterEscapeValue: onexitdata, characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker, characterReferenceMarkerNumeric: onexitcharacterreferencemarker, characterReferenceValue: onexitcharacterreferencevalue, characterReference: onexitcharacterreference, codeFenced: closer(onexitcodefenced), codeFencedFence: onexitcodefencedfence, codeFencedFenceInfo: onexitcodefencedfenceinfo, codeFencedFenceMeta: onexitcodefencedfencemeta, codeFlowValue: onexitdata, codeIndented: closer(onexitcodeindented), codeText: closer(onexitcodetext), codeTextData: onexitdata, data: onexitdata, definition: closer(), definitionDestinationString: onexitdefinitiondestinationstring, definitionLabelString: onexitdefinitionlabelstring, definitionTitleString: onexitdefinitiontitlestring, emphasis: closer(), hardBreakEscape: closer(onexithardbreak), hardBreakTrailing: closer(onexithardbreak), htmlFlow: closer(onexithtmlflow), htmlFlowData: onexitdata, htmlText: closer(onexithtmltext), htmlTextData: onexitdata, image: closer(onexitimage), label: onexitlabel, labelText: onexitlabeltext, lineEnding: onexitlineending, link: closer(onexitlink), listItem: closer(), listOrdered: closer(), listUnordered: closer(), paragraph: closer(), referenceString: onexitreferencestring, resourceDestinationString: onexitresourcedestinationstring, resourceTitleString: onexitresourcetitlestring, resource: onexitresource, setextHeading: closer(onexitsetextheading), setextHeadingLineSequence: onexitsetextheadinglinesequence, setextHeadingText: onexitsetextheadingtext, strong: closer(), thematicBreak: closer() } }; configure$1(config, (options || {}).mdastExtensions || []); /** @type {CompileData} */ const data = {}; return compile; /** * Turn micromark events into an mdast tree. * * @param {Array} events * Events. * @returns {Root} * mdast tree. */ function compile(events) { /** @type {Root} */ let tree = { type: 'root', children: [] }; /** @type {Omit} */ const context = { stack: [tree], tokenStack: [], config, enter, exit, buffer, resume, data }; /** @type {Array} */ const listStack = []; let index = -1; while (++index < events.length) { // We preprocess lists to add `listItem` tokens, and to infer whether // items the list itself are spread out. if (events[index][1].type === "listOrdered" || events[index][1].type === "listUnordered") { if (events[index][0] === 'enter') { listStack.push(index); } else { const tail = listStack.pop(); index = prepareList(events, tail, index); } } } index = -1; while (++index < events.length) { const handler = config[events[index][0]]; if (own$3.call(handler, events[index][1].type)) { handler[events[index][1].type].call(Object.assign({ sliceSerialize: events[index][2].sliceSerialize }, context), events[index][1]); } } // Handle tokens still being open. if (context.tokenStack.length > 0) { const tail = context.tokenStack[context.tokenStack.length - 1]; const handler = tail[1] || defaultOnError; handler.call(context, undefined, tail[0]); } // Figure out `root` position. tree.position = { start: point(events.length > 0 ? events[0][1].start : { line: 1, column: 1, offset: 0 }), end: point(events.length > 0 ? events[events.length - 2][1].end : { line: 1, column: 1, offset: 0 }) }; // Call transforms. index = -1; while (++index < config.transforms.length) { tree = config.transforms[index](tree) || tree; } return tree; } /** * @param {Array} events * @param {number} start * @param {number} length * @returns {number} */ function prepareList(events, start, length) { let index = start - 1; let containerBalance = -1; let listSpread = false; /** @type {Token | undefined} */ let listItem; /** @type {number | undefined} */ let lineIndex; /** @type {number | undefined} */ let firstBlankLineIndex; /** @type {boolean | undefined} */ let atMarker; while (++index <= length) { const event = events[index]; switch (event[1].type) { case "listUnordered": case "listOrdered": case "blockQuote": { if (event[0] === 'enter') { containerBalance++; } else { containerBalance--; } atMarker = undefined; break; } case "lineEndingBlank": { if (event[0] === 'enter') { if (listItem && !atMarker && !containerBalance && !firstBlankLineIndex) { firstBlankLineIndex = index; } atMarker = undefined; } break; } case "linePrefix": case "listItemValue": case "listItemMarker": case "listItemPrefix": case "listItemPrefixWhitespace": { // Empty. break; } default: { atMarker = undefined; } } if (!containerBalance && event[0] === 'enter' && event[1].type === "listItemPrefix" || containerBalance === -1 && event[0] === 'exit' && (event[1].type === "listUnordered" || event[1].type === "listOrdered")) { if (listItem) { let tailIndex = index; lineIndex = undefined; while (tailIndex--) { const tailEvent = events[tailIndex]; if (tailEvent[1].type === "lineEnding" || tailEvent[1].type === "lineEndingBlank") { if (tailEvent[0] === 'exit') continue; if (lineIndex) { events[lineIndex][1].type = "lineEndingBlank"; listSpread = true; } tailEvent[1].type = "lineEnding"; lineIndex = tailIndex; } else if (tailEvent[1].type === "linePrefix" || tailEvent[1].type === "blockQuotePrefix" || tailEvent[1].type === "blockQuotePrefixWhitespace" || tailEvent[1].type === "blockQuoteMarker" || tailEvent[1].type === "listItemIndent") ; else { break; } } if (firstBlankLineIndex && (!lineIndex || firstBlankLineIndex < lineIndex)) { listItem._spread = true; } // Fix position. listItem.end = Object.assign({}, lineIndex ? events[lineIndex][1].start : event[1].end); events.splice(lineIndex || index, 0, ['exit', listItem, event[2]]); index++; length++; } // Create a new list item. if (event[1].type === "listItemPrefix") { /** @type {Token} */ const item = { type: 'listItem', _spread: false, start: Object.assign({}, event[1].start), // @ts-expect-error: we’ll add `end` in a second. end: undefined }; listItem = item; events.splice(index, 0, ['enter', item, event[2]]); index++; length++; firstBlankLineIndex = undefined; atMarker = true; } } } events[start][1]._spread = listSpread; return length; } /** * Create an opener handle. * * @param {(token: Token) => Nodes} create * Create a node. * @param {Handle | undefined} [and] * Optional function to also run. * @returns {Handle} * Handle. */ function opener(create, and) { return open; /** * @this {CompileContext} * @param {Token} token * @returns {undefined} */ function open(token) { enter.call(this, create(token), token); if (and) and.call(this, token); } } /** * @this {CompileContext} * @returns {undefined} */ function buffer() { this.stack.push({ type: 'fragment', children: [] }); } /** * @this {CompileContext} * Context. * @param {Nodes} node * Node to enter. * @param {Token} token * Corresponding token. * @param {OnEnterError | undefined} [errorHandler] * Handle the case where this token is open, but it is closed by something else. * @returns {undefined} * Nothing. */ function enter(node, token, errorHandler) { const parent = this.stack[this.stack.length - 1]; /** @type {Array} */ const siblings = parent.children; siblings.push(node); this.stack.push(node); this.tokenStack.push([token, errorHandler]); node.position = { start: point(token.start), // @ts-expect-error: `end` will be patched later. end: undefined }; } /** * Create a closer handle. * * @param {Handle | undefined} [and] * Optional function to also run. * @returns {Handle} * Handle. */ function closer(and) { return close; /** * @this {CompileContext} * @param {Token} token * @returns {undefined} */ function close(token) { if (and) and.call(this, token); exit.call(this, token); } } /** * @this {CompileContext} * Context. * @param {Token} token * Corresponding token. * @param {OnExitError | undefined} [onExitError] * Handle the case where another token is open. * @returns {undefined} * Nothing. */ function exit(token, onExitError) { const node = this.stack.pop(); const open = this.tokenStack.pop(); if (!open) { throw new Error('Cannot close `' + token.type + '` (' + stringifyPosition({ start: token.start, end: token.end }) + '): it’s not open'); } else if (open[0].type !== token.type) { if (onExitError) { onExitError.call(this, token, open[0]); } else { const handler = open[1] || defaultOnError; handler.call(this, token, open[0]); } } node.position.end = point(token.end); } /** * @this {CompileContext} * @returns {string} */ function resume() { return toString$1(this.stack.pop()); } // // Handlers. // /** * @this {CompileContext} * @type {Handle} */ function onenterlistordered() { this.data.expectingFirstListItemValue = true; } /** * @this {CompileContext} * @type {Handle} */ function onenterlistitemvalue(token) { if (this.data.expectingFirstListItemValue) { const ancestor = this.stack[this.stack.length - 2]; ancestor.start = Number.parseInt(this.sliceSerialize(token), 10); this.data.expectingFirstListItemValue = undefined; } } /** * @this {CompileContext} * @type {Handle} */ function onexitcodefencedfenceinfo() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.lang = data; } /** * @this {CompileContext} * @type {Handle} */ function onexitcodefencedfencemeta() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.meta = data; } /** * @this {CompileContext} * @type {Handle} */ function onexitcodefencedfence() { // Exit if this is the closing fence. if (this.data.flowCodeInside) return; this.buffer(); this.data.flowCodeInside = true; } /** * @this {CompileContext} * @type {Handle} */ function onexitcodefenced() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.value = data.replace(/^(\r?\n|\r)|(\r?\n|\r)$/g, ''); this.data.flowCodeInside = undefined; } /** * @this {CompileContext} * @type {Handle} */ function onexitcodeindented() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.value = data.replace(/(\r?\n|\r)$/g, ''); } /** * @this {CompileContext} * @type {Handle} */ function onexitdefinitionlabelstring(token) { const label = this.resume(); const node = this.stack[this.stack.length - 1]; node.label = label; node.identifier = normalizeIdentifier(this.sliceSerialize(token)).toLowerCase(); } /** * @this {CompileContext} * @type {Handle} */ function onexitdefinitiontitlestring() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.title = data; } /** * @this {CompileContext} * @type {Handle} */ function onexitdefinitiondestinationstring() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.url = data; } /** * @this {CompileContext} * @type {Handle} */ function onexitatxheadingsequence(token) { const node = this.stack[this.stack.length - 1]; if (!node.depth) { const depth = this.sliceSerialize(token).length; node.depth = depth; } } /** * @this {CompileContext} * @type {Handle} */ function onexitsetextheadingtext() { this.data.setextHeadingSlurpLineEnding = true; } /** * @this {CompileContext} * @type {Handle} */ function onexitsetextheadinglinesequence(token) { const node = this.stack[this.stack.length - 1]; node.depth = this.sliceSerialize(token).codePointAt(0) === 61 ? 1 : 2; } /** * @this {CompileContext} * @type {Handle} */ function onexitsetextheading() { this.data.setextHeadingSlurpLineEnding = undefined; } /** * @this {CompileContext} * @type {Handle} */ function onenterdata(token) { const node = this.stack[this.stack.length - 1]; /** @type {Array} */ const siblings = node.children; let tail = siblings[siblings.length - 1]; if (!tail || tail.type !== 'text') { // Add a new text node. tail = text(); tail.position = { start: point(token.start), // @ts-expect-error: we’ll add `end` later. end: undefined }; siblings.push(tail); } this.stack.push(tail); } /** * @this {CompileContext} * @type {Handle} */ function onexitdata(token) { const tail = this.stack.pop(); tail.value += this.sliceSerialize(token); tail.position.end = point(token.end); } /** * @this {CompileContext} * @type {Handle} */ function onexitlineending(token) { const context = this.stack[this.stack.length - 1]; // If we’re at a hard break, include the line ending in there. if (this.data.atHardBreak) { const tail = context.children[context.children.length - 1]; tail.position.end = point(token.end); this.data.atHardBreak = undefined; return; } if (!this.data.setextHeadingSlurpLineEnding && config.canContainEols.includes(context.type)) { onenterdata.call(this, token); onexitdata.call(this, token); } } /** * @this {CompileContext} * @type {Handle} */ function onexithardbreak() { this.data.atHardBreak = true; } /** * @this {CompileContext} * @type {Handle} */ function onexithtmlflow() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.value = data; } /** * @this {CompileContext} * @type {Handle} */ function onexithtmltext() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.value = data; } /** * @this {CompileContext} * @type {Handle} */ function onexitcodetext() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.value = data; } /** * @this {CompileContext} * @type {Handle} */ function onexitlink() { const node = this.stack[this.stack.length - 1]; // Note: there are also `identifier` and `label` fields on this link node! // These are used / cleaned here. // To do: clean. if (this.data.inReference) { /** @type {ReferenceType} */ const referenceType = this.data.referenceType || 'shortcut'; node.type += 'Reference'; // @ts-expect-error: mutate. node.referenceType = referenceType; // @ts-expect-error: mutate. delete node.url; delete node.title; } else { // @ts-expect-error: mutate. delete node.identifier; // @ts-expect-error: mutate. delete node.label; } this.data.referenceType = undefined; } /** * @this {CompileContext} * @type {Handle} */ function onexitimage() { const node = this.stack[this.stack.length - 1]; // Note: there are also `identifier` and `label` fields on this link node! // These are used / cleaned here. // To do: clean. if (this.data.inReference) { /** @type {ReferenceType} */ const referenceType = this.data.referenceType || 'shortcut'; node.type += 'Reference'; // @ts-expect-error: mutate. node.referenceType = referenceType; // @ts-expect-error: mutate. delete node.url; delete node.title; } else { // @ts-expect-error: mutate. delete node.identifier; // @ts-expect-error: mutate. delete node.label; } this.data.referenceType = undefined; } /** * @this {CompileContext} * @type {Handle} */ function onexitlabeltext(token) { const string = this.sliceSerialize(token); const ancestor = this.stack[this.stack.length - 2]; // @ts-expect-error: stash this on the node, as it might become a reference // later. ancestor.label = decodeString(string); // @ts-expect-error: same as above. ancestor.identifier = normalizeIdentifier(string).toLowerCase(); } /** * @this {CompileContext} * @type {Handle} */ function onexitlabel() { const fragment = this.stack[this.stack.length - 1]; const value = this.resume(); const node = this.stack[this.stack.length - 1]; // Assume a reference. this.data.inReference = true; if (node.type === 'link') { /** @type {Array} */ const children = fragment.children; node.children = children; } else { node.alt = value; } } /** * @this {CompileContext} * @type {Handle} */ function onexitresourcedestinationstring() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.url = data; } /** * @this {CompileContext} * @type {Handle} */ function onexitresourcetitlestring() { const data = this.resume(); const node = this.stack[this.stack.length - 1]; node.title = data; } /** * @this {CompileContext} * @type {Handle} */ function onexitresource() { this.data.inReference = undefined; } /** * @this {CompileContext} * @type {Handle} */ function onenterreference() { this.data.referenceType = 'collapsed'; } /** * @this {CompileContext} * @type {Handle} */ function onexitreferencestring(token) { const label = this.resume(); const node = this.stack[this.stack.length - 1]; // @ts-expect-error: stash this on the node, as it might become a reference // later. node.label = label; // @ts-expect-error: same as above. node.identifier = normalizeIdentifier(this.sliceSerialize(token)).toLowerCase(); this.data.referenceType = 'full'; } /** * @this {CompileContext} * @type {Handle} */ function onexitcharacterreferencemarker(token) { this.data.characterReferenceType = token.type; } /** * @this {CompileContext} * @type {Handle} */ function onexitcharacterreferencevalue(token) { const data = this.sliceSerialize(token); const type = this.data.characterReferenceType; /** @type {string} */ let value; if (type) { value = decodeNumericCharacterReference(data, type === "characterReferenceMarkerNumeric" ? 10 : 16); this.data.characterReferenceType = undefined; } else { const result = decodeNamedCharacterReference(data); value = result; } const tail = this.stack[this.stack.length - 1]; tail.value += value; } /** * @this {CompileContext} * @type {Handle} */ function onexitcharacterreference(token) { const tail = this.stack.pop(); tail.position.end = point(token.end); } /** * @this {CompileContext} * @type {Handle} */ function onexitautolinkprotocol(token) { onexitdata.call(this, token); const node = this.stack[this.stack.length - 1]; node.url = this.sliceSerialize(token); } /** * @this {CompileContext} * @type {Handle} */ function onexitautolinkemail(token) { onexitdata.call(this, token); const node = this.stack[this.stack.length - 1]; node.url = 'mailto:' + this.sliceSerialize(token); } // // Creaters. // /** @returns {Blockquote} */ function blockQuote() { return { type: 'blockquote', children: [] }; } /** @returns {Code} */ function codeFlow() { return { type: 'code', lang: null, meta: null, value: '' }; } /** @returns {InlineCode} */ function codeText() { return { type: 'inlineCode', value: '' }; } /** @returns {Definition} */ function definition() { return { type: 'definition', identifier: '', label: null, title: null, url: '' }; } /** @returns {Emphasis} */ function emphasis() { return { type: 'emphasis', children: [] }; } /** @returns {Heading} */ function heading() { return { type: 'heading', // @ts-expect-error `depth` will be set later. depth: 0, children: [] }; } /** @returns {Break} */ function hardBreak() { return { type: 'break' }; } /** @returns {Html} */ function html() { return { type: 'html', value: '' }; } /** @returns {Image} */ function image() { return { type: 'image', title: null, url: '', alt: null }; } /** @returns {Link} */ function link() { return { type: 'link', title: null, url: '', children: [] }; } /** * @param {Token} token * @returns {List} */ function list(token) { return { type: 'list', ordered: token.type === 'listOrdered', start: null, spread: token._spread, children: [] }; } /** * @param {Token} token * @returns {ListItem} */ function listItem(token) { return { type: 'listItem', spread: token._spread, checked: null, children: [] }; } /** @returns {Paragraph} */ function paragraph() { return { type: 'paragraph', children: [] }; } /** @returns {Strong} */ function strong() { return { type: 'strong', children: [] }; } /** @returns {Text} */ function text() { return { type: 'text', value: '' }; } /** @returns {ThematicBreak} */ function thematicBreak() { return { type: 'thematicBreak' }; } } /** * Copy a point-like value. * * @param {Point} d * Point-like value. * @returns {Point} * unist point. */ function point(d) { return { line: d.line, column: d.column, offset: d.offset }; } /** * @param {Config} combined * @param {Array | Extension>} extensions * @returns {undefined} */ function configure$1(combined, extensions) { let index = -1; while (++index < extensions.length) { const value = extensions[index]; if (Array.isArray(value)) { configure$1(combined, value); } else { extension(combined, value); } } } /** * @param {Config} combined * @param {Extension} extension * @returns {undefined} */ function extension(combined, extension) { /** @type {keyof Extension} */ let key; for (key in extension) { if (own$3.call(extension, key)) { switch (key) { case 'canContainEols': { const right = extension[key]; if (right) { combined[key].push(...right); } break; } case 'transforms': { const right = extension[key]; if (right) { combined[key].push(...right); } break; } case 'enter': case 'exit': { const right = extension[key]; if (right) { Object.assign(combined[key], right); } break; } // No default } } } } /** @type {OnEnterError} */ function defaultOnError(left, right) { if (left) { throw new Error('Cannot close `' + left.type + '` (' + stringifyPosition({ start: left.start, end: left.end }) + '): a different token (`' + right.type + '`, ' + stringifyPosition({ start: right.start, end: right.end }) + ') is open'); } else { throw new Error('Cannot close document, a token (`' + right.type + '`, ' + stringifyPosition({ start: right.start, end: right.end }) + ') is still open'); } } /** * @typedef {import('mdast').Root} Root * @typedef {import('mdast-util-from-markdown').Options} FromMarkdownOptions * @typedef {import('unified').Parser} Parser * @typedef {import('unified').Processor} Processor */ /** * Aadd support for parsing from markdown. * * @param {Readonly | null | undefined} [options] * Configuration (optional). * @returns {undefined} * Nothing. */ function remarkParse(options) { /** @type {Processor} */ // @ts-expect-error: TS in JSDoc generates wrong types if `this` is typed regularly. const self = this; self.parser = parser; /** * @type {Parser} */ function parser(doc) { return fromMarkdown(doc, { ...self.data('settings'), ...options, // Note: these options are not in the readme. // The goal is for them to be set by plugins on `data` instead of being // passed by users. extensions: self.data('micromarkExtensions') || [], mdastExtensions: self.data('fromMarkdownExtensions') || [] }) } } /** * @callback Handler * Handle a value, with a certain ID field set to a certain value. * The ID field is passed to `zwitch`, and it’s value is this function’s * place on the `handlers` record. * @param {...any} parameters * Arbitrary parameters passed to the zwitch. * The first will be an object with a certain ID field set to a certain value. * @returns {any} * Anything! */ /** * @callback UnknownHandler * Handle values that do have a certain ID field, but it’s set to a value * that is not listed in the `handlers` record. * @param {unknown} value * An object with a certain ID field set to an unknown value. * @param {...any} rest * Arbitrary parameters passed to the zwitch. * @returns {any} * Anything! */ /** * @callback InvalidHandler * Handle values that do not have a certain ID field. * @param {unknown} value * Any unknown value. * @param {...any} rest * Arbitrary parameters passed to the zwitch. * @returns {void|null|undefined|never} * This should crash or return nothing. */ /** * @template {InvalidHandler} [Invalid=InvalidHandler] * @template {UnknownHandler} [Unknown=UnknownHandler] * @template {Record} [Handlers=Record] * @typedef Options * Configuration (required). * @property {Invalid} [invalid] * Handler to use for invalid values. * @property {Unknown} [unknown] * Handler to use for unknown values. * @property {Handlers} [handlers] * Handlers to use. */ const own$2 = {}.hasOwnProperty; /** * Handle values based on a field. * * @template {InvalidHandler} [Invalid=InvalidHandler] * @template {UnknownHandler} [Unknown=UnknownHandler] * @template {Record} [Handlers=Record] * @param {string} key * Field to switch on. * @param {Options} [options] * Configuration (required). * @returns {{unknown: Unknown, invalid: Invalid, handlers: Handlers, (...parameters: Parameters): ReturnType, (...parameters: Parameters): ReturnType}} */ function zwitch(key, options) { const settings = options || {}; /** * Handle one value. * * Based on the bound `key`, a respective handler will be called. * If `value` is not an object, or doesn’t have a `key` property, the special * “invalid” handler will be called. * If `value` has an unknown `key`, the special “unknown” handler will be * called. * * All arguments, and the context object, are passed through to the handler, * and it’s result is returned. * * @this {unknown} * Any context object. * @param {unknown} [value] * Any value. * @param {...unknown} parameters * Arbitrary parameters passed to the zwitch. * @property {Handler} invalid * Handle for values that do not have a certain ID field. * @property {Handler} unknown * Handle values that do have a certain ID field, but it’s set to a value * that is not listed in the `handlers` record. * @property {Handlers} handlers * Record of handlers. * @returns {unknown} * Anything. */ function one(value, ...parameters) { /** @type {Handler|undefined} */ let fn = one.invalid; const handlers = one.handlers; if (value && own$2.call(value, key)) { // @ts-expect-error Indexable. const id = String(value[key]); // @ts-expect-error Indexable. fn = own$2.call(handlers, id) ? handlers[id] : one.unknown; } if (fn) { return fn.call(this, value, ...parameters) } } one.handlers = settings.handlers || {}; one.invalid = settings.invalid; one.unknown = settings.unknown; // @ts-expect-error: matches! return one } /** * @typedef {import('./types.js').Options} Options * @typedef {import('./types.js').State} State */ const own = {}.hasOwnProperty; /** * @param {State} base * @param {Options} extension * @returns {State} */ function configure(base, extension) { let index = -1; /** @type {keyof Options} */ let key; // First do subextensions. if (extension.extensions) { while (++index < extension.extensions.length) { configure(base, extension.extensions[index]); } } for (key in extension) { if (own.call(extension, key)) { switch (key) { case 'extensions': { // Empty. break } /* c8 ignore next 4 */ case 'unsafe': { list$2(base[key], extension[key]); break } case 'join': { list$2(base[key], extension[key]); break } case 'handlers': { map$2(base[key], extension[key]); break } default: { // @ts-expect-error: matches. base.options[key] = extension[key]; } } } } return base } /** * @template T * @param {Array} left * @param {Array | null | undefined} right */ function list$2(left, right) { if (right) { left.push(...right); } } /** * @template T * @param {Record} left * @param {Record | null | undefined} right */ function map$2(left, right) { if (right) { Object.assign(left, right); } } /** * @typedef {import('mdast').Blockquote} Blockquote * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').Map} Map * @typedef {import('../types.js').State} State */ /** * @param {Blockquote} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function blockquote(node, _, state, info) { const exit = state.enter('blockquote'); const tracker = state.createTracker(info); tracker.move('> '); tracker.shift(2); const value = state.indentLines( state.containerFlow(node, tracker.current()), map$1 ); exit(); return value } /** @type {Map} */ function map$1(line, _, blank) { return '>' + (blank ? '' : ' ') + line } /** * @typedef {import('../types.js').ConstructName} ConstructName * @typedef {import('../types.js').Unsafe} Unsafe */ /** * @param {Array} stack * @param {Unsafe} pattern * @returns {boolean} */ function patternInScope(stack, pattern) { return ( listInScope(stack, pattern.inConstruct, true) && !listInScope(stack, pattern.notInConstruct, false) ) } /** * @param {Array} stack * @param {Unsafe['inConstruct']} list * @param {boolean} none * @returns {boolean} */ function listInScope(stack, list, none) { if (typeof list === 'string') { list = [list]; } if (!list || list.length === 0) { return none } let index = -1; while (++index < list.length) { if (stack.includes(list[index])) { return true } } return false } /** * @typedef {import('mdast').Break} Break * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ /** * @param {Break} _ * @param {Parents | undefined} _1 * @param {State} state * @param {Info} info * @returns {string} */ function hardBreak(_, _1, state, info) { let index = -1; while (++index < state.unsafe.length) { // If we can’t put eols in this construct (setext headings, tables), use a // space instead. if ( state.unsafe[index].character === '\n' && patternInScope(state.stack, state.unsafe[index]) ) { return /[ \t]/.test(info.before) ? '' : ' ' } } return '\\\n' } /** * Get the count of the longest repeating streak of `substring` in `value`. * * @param {string} value * Content to search in. * @param {string} substring * Substring to look for, typically one character. * @returns {number} * Count of most frequent adjacent `substring`s in `value`. */ function longestStreak(value, substring) { const source = String(value); let index = source.indexOf(substring); let expected = index; let count = 0; let max = 0; if (typeof substring !== 'string') { throw new TypeError('Expected substring') } while (index !== -1) { if (index === expected) { if (++count > max) { max = count; } } else { count = 1; } expected = index + substring.length; index = source.indexOf(substring, expected); } return max } /** * @typedef {import('mdast').Code} Code * @typedef {import('../types.js').State} State */ /** * @param {Code} node * @param {State} state * @returns {boolean} */ function formatCodeAsIndented(node, state) { return Boolean( state.options.fences === false && node.value && // If there’s no info… !node.lang && // And there’s a non-whitespace character… /[^ \r\n]/.test(node.value) && // And the value doesn’t start or end in a blank… !/^[\t ]*(?:[\r\n]|$)|(?:^|[\r\n])[\t ]*$/.test(node.value) ) } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkFence(state) { const marker = state.options.fence || '`'; if (marker !== '`' && marker !== '~') { throw new Error( 'Cannot serialize code with `' + marker + '` for `options.fence`, expected `` ` `` or `~`' ) } return marker } /** * @typedef {import('mdast').Code} Code * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').Map} Map * @typedef {import('../types.js').State} State */ /** * @param {Code} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function code(node, _, state, info) { const marker = checkFence(state); const raw = node.value || ''; const suffix = marker === '`' ? 'GraveAccent' : 'Tilde'; if (formatCodeAsIndented(node, state)) { const exit = state.enter('codeIndented'); const value = state.indentLines(raw, map); exit(); return value } const tracker = state.createTracker(info); const sequence = marker.repeat(Math.max(longestStreak(raw, marker) + 1, 3)); const exit = state.enter('codeFenced'); let value = tracker.move(sequence); if (node.lang) { const subexit = state.enter(`codeFencedLang${suffix}`); value += tracker.move( state.safe(node.lang, { before: value, after: ' ', encode: ['`'], ...tracker.current() }) ); subexit(); } if (node.lang && node.meta) { const subexit = state.enter(`codeFencedMeta${suffix}`); value += tracker.move(' '); value += tracker.move( state.safe(node.meta, { before: value, after: '\n', encode: ['`'], ...tracker.current() }) ); subexit(); } value += tracker.move('\n'); if (raw) { value += tracker.move(raw + '\n'); } value += tracker.move(sequence); exit(); return value } /** @type {Map} */ function map(line, _, blank) { return (blank ? '' : ' ') + line } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkQuote(state) { const marker = state.options.quote || '"'; if (marker !== '"' && marker !== "'") { throw new Error( 'Cannot serialize title with `' + marker + '` for `options.quote`, expected `"`, or `\'`' ) } return marker } /** * @typedef {import('mdast').Definition} Definition * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ /** * @param {Definition} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function definition(node, _, state, info) { const quote = checkQuote(state); const suffix = quote === '"' ? 'Quote' : 'Apostrophe'; const exit = state.enter('definition'); let subexit = state.enter('label'); const tracker = state.createTracker(info); let value = tracker.move('['); value += tracker.move( state.safe(state.associationId(node), { before: value, after: ']', ...tracker.current() }) ); value += tracker.move(']: '); subexit(); if ( // If there’s no url, or… !node.url || // If there are control characters or whitespace. /[\0- \u007F]/.test(node.url) ) { subexit = state.enter('destinationLiteral'); value += tracker.move('<'); value += tracker.move( state.safe(node.url, {before: value, after: '>', ...tracker.current()}) ); value += tracker.move('>'); } else { // No whitespace, raw is prettier. subexit = state.enter('destinationRaw'); value += tracker.move( state.safe(node.url, { before: value, after: node.title ? ' ' : '\n', ...tracker.current() }) ); } subexit(); if (node.title) { subexit = state.enter(`title${suffix}`); value += tracker.move(' ' + quote); value += tracker.move( state.safe(node.title, { before: value, after: quote, ...tracker.current() }) ); value += tracker.move(quote); subexit(); } exit(); return value } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkEmphasis(state) { const marker = state.options.emphasis || '*'; if (marker !== '*' && marker !== '_') { throw new Error( 'Cannot serialize emphasis with `' + marker + '` for `options.emphasis`, expected `*`, or `_`' ) } return marker } /** * @typedef {import('mdast').Emphasis} Emphasis * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ emphasis.peek = emphasisPeek; // To do: there are cases where emphasis cannot “form” depending on the // previous or next character of sequences. // There’s no way around that though, except for injecting zero-width stuff. // Do we need to safeguard against that? /** * @param {Emphasis} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function emphasis(node, _, state, info) { const marker = checkEmphasis(state); const exit = state.enter('emphasis'); const tracker = state.createTracker(info); let value = tracker.move(marker); value += tracker.move( state.containerPhrasing(node, { before: value, after: marker, ...tracker.current() }) ); value += tracker.move(marker); exit(); return value } /** * @param {Emphasis} _ * @param {Parents | undefined} _1 * @param {State} state * @returns {string} */ function emphasisPeek(_, _1, state) { return state.options.emphasis || '*' } /** * @typedef {import('unist').Node} Node * @typedef {import('unist').Parent} Parent */ /** * Generate an assertion from a test. * * Useful if you’re going to test many nodes, for example when creating a * utility where something else passes a compatible test. * * The created function is a bit faster because it expects valid input only: * a `node`, `index`, and `parent`. * * @param {Test} test * * when nullish, checks if `node` is a `Node`. * * when `string`, works like passing `(node) => node.type === test`. * * when `function` checks if function passed the node is true. * * when `object`, checks that all keys in test are in node, and that they have (strictly) equal values. * * when `array`, checks if any one of the subtests pass. * @returns {Check} * An assertion. */ const convert = // Note: overloads in JSDoc can’t yet use different `@template`s. /** * @type {( * ((test: Condition) => (node: unknown, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & {type: Condition}) & * ((test: Condition) => (node: unknown, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & Condition) & * ((test: Condition) => (node: unknown, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & Predicate) & * ((test?: null | undefined) => (node?: unknown, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node) & * ((test?: Test) => Check) * )} */ ( /** * @param {Test} [test] * @returns {Check} */ function (test) { if (test === null || test === undefined) { return ok } if (typeof test === 'function') { return castFactory(test) } if (typeof test === 'object') { return Array.isArray(test) ? anyFactory(test) : propsFactory(test) } if (typeof test === 'string') { return typeFactory(test) } throw new Error('Expected function, string, or object as test') } ); /** * @param {Array} tests * @returns {Check} */ function anyFactory(tests) { /** @type {Array} */ const checks = []; let index = -1; while (++index < tests.length) { checks[index] = convert(tests[index]); } return castFactory(any) /** * @this {unknown} * @type {TestFunction} */ function any(...parameters) { let index = -1; while (++index < checks.length) { if (checks[index].apply(this, parameters)) return true } return false } } /** * Turn an object into a test for a node with a certain fields. * * @param {Props} check * @returns {Check} */ function propsFactory(check) { const checkAsRecord = /** @type {Record} */ (check); return castFactory(all) /** * @param {Node} node * @returns {boolean} */ function all(node) { const nodeAsRecord = /** @type {Record} */ ( /** @type {unknown} */ (node) ); /** @type {string} */ let key; for (key in check) { if (nodeAsRecord[key] !== checkAsRecord[key]) return false } return true } } /** * Turn a string into a test for a node with a certain type. * * @param {string} check * @returns {Check} */ function typeFactory(check) { return castFactory(type) /** * @param {Node} node */ function type(node) { return node && node.type === check } } /** * Turn a custom test into a test for a node that passes that test. * * @param {TestFunction} testFunction * @returns {Check} */ function castFactory(testFunction) { return check /** * @this {unknown} * @type {Check} */ function check(value, index, parent) { return Boolean( looksLikeANode(value) && testFunction.call( this, value, typeof index === 'number' ? index : undefined, parent || undefined ) ) } } function ok() { return true } /** * @param {unknown} value * @returns {value is Node} */ function looksLikeANode(value) { return value !== null && typeof value === 'object' && 'type' in value } /** * @param {string} d * @returns {string} */ function color(d) { return '\u001B[33m' + d + '\u001B[39m' } /** * @typedef {import('unist').Node} UnistNode * @typedef {import('unist').Parent} UnistParent */ /** @type {Readonly} */ const empty$2 = []; /** * Continue traversing as normal. */ const CONTINUE = true; /** * Stop traversing immediately. */ const EXIT = false; /** * Do not traverse this node’s children. */ const SKIP = 'skip'; /** * Visit nodes, with ancestral information. * * This algorithm performs *depth-first* *tree traversal* in *preorder* * (**NLR**) or if `reverse` is given, in *reverse preorder* (**NRL**). * * You can choose for which nodes `visitor` is called by passing a `test`. * For complex tests, you should test yourself in `visitor`, as it will be * faster and will have improved type information. * * Walking the tree is an intensive task. * Make use of the return values of the visitor when possible. * Instead of walking a tree multiple times, walk it once, use `unist-util-is` * to check if a node matches, and then perform different operations. * * You can change the tree. * See `Visitor` for more info. * * @overload * @param {Tree} tree * @param {Check} check * @param {BuildVisitor} visitor * @param {boolean | null | undefined} [reverse] * @returns {undefined} * * @overload * @param {Tree} tree * @param {BuildVisitor} visitor * @param {boolean | null | undefined} [reverse] * @returns {undefined} * * @param {UnistNode} tree * Tree to traverse. * @param {Visitor | Test} test * `unist-util-is`-compatible test * @param {Visitor | boolean | null | undefined} [visitor] * Handle each node. * @param {boolean | null | undefined} [reverse] * Traverse in reverse preorder (NRL) instead of the default preorder (NLR). * @returns {undefined} * Nothing. * * @template {UnistNode} Tree * Node type. * @template {Test} Check * `unist-util-is`-compatible test. */ function visitParents(tree, test, visitor, reverse) { /** @type {Test} */ let check; if (typeof test === 'function' && typeof visitor !== 'function') { reverse = visitor; // @ts-expect-error no visitor given, so `visitor` is test. visitor = test; } else { // @ts-expect-error visitor given, so `test` isn’t a visitor. check = test; } const is = convert(check); const step = reverse ? -1 : 1; factory(tree, undefined, [])(); /** * @param {UnistNode} node * @param {number | undefined} index * @param {Array} parents */ function factory(node, index, parents) { const value = /** @type {Record} */ ( node && typeof node === 'object' ? node : {} ); if (typeof value.type === 'string') { const name = // `hast` typeof value.tagName === 'string' ? value.tagName : // `xast` typeof value.name === 'string' ? value.name : undefined; Object.defineProperty(visit, 'name', { value: 'node (' + color(node.type + (name ? '<' + name + '>' : '')) + ')' }); } return visit function visit() { /** @type {Readonly} */ let result = empty$2; /** @type {Readonly} */ let subresult; /** @type {number} */ let offset; /** @type {Array} */ let grandparents; if (!test || is(node, index, parents[parents.length - 1] || undefined)) { // @ts-expect-error: `visitor` is now a visitor. result = toResult(visitor(node, parents)); if (result[0] === EXIT) { return result } } if ('children' in node && node.children) { const nodeAsParent = /** @type {UnistParent} */ (node); if (nodeAsParent.children && result[0] !== SKIP) { offset = (reverse ? nodeAsParent.children.length : -1) + step; grandparents = parents.concat(nodeAsParent); while (offset > -1 && offset < nodeAsParent.children.length) { const child = nodeAsParent.children[offset]; subresult = factory(child, offset, grandparents)(); if (subresult[0] === EXIT) { return subresult } offset = typeof subresult[1] === 'number' ? subresult[1] : offset + step; } } } return result } } } /** * Turn a return value into a clean result. * * @param {VisitorResult} value * Valid return values from visitors. * @returns {Readonly} * Clean result. */ function toResult(value) { if (Array.isArray(value)) { return value } if (typeof value === 'number') { return [CONTINUE, value] } return value === null || value === undefined ? empty$2 : [value] } /** * @typedef {import('unist').Node} UnistNode * @typedef {import('unist').Parent} UnistParent * @typedef {import('unist-util-visit-parents').VisitorResult} VisitorResult */ /** * Visit nodes. * * This algorithm performs *depth-first* *tree traversal* in *preorder* * (**NLR**) or if `reverse` is given, in *reverse preorder* (**NRL**). * * You can choose for which nodes `visitor` is called by passing a `test`. * For complex tests, you should test yourself in `visitor`, as it will be * faster and will have improved type information. * * Walking the tree is an intensive task. * Make use of the return values of the visitor when possible. * Instead of walking a tree multiple times, walk it once, use `unist-util-is` * to check if a node matches, and then perform different operations. * * You can change the tree. * See `Visitor` for more info. * * @overload * @param {Tree} tree * @param {Check} check * @param {BuildVisitor} visitor * @param {boolean | null | undefined} [reverse] * @returns {undefined} * * @overload * @param {Tree} tree * @param {BuildVisitor} visitor * @param {boolean | null | undefined} [reverse] * @returns {undefined} * * @param {UnistNode} tree * Tree to traverse. * @param {Visitor | Test} testOrVisitor * `unist-util-is`-compatible test (optional, omit to pass a visitor). * @param {Visitor | boolean | null | undefined} [visitorOrReverse] * Handle each node (when test is omitted, pass `reverse`). * @param {boolean | null | undefined} [maybeReverse=false] * Traverse in reverse preorder (NRL) instead of the default preorder (NLR). * @returns {undefined} * Nothing. * * @template {UnistNode} Tree * Node type. * @template {Test} Check * `unist-util-is`-compatible test. */ function visit(tree, testOrVisitor, visitorOrReverse, maybeReverse) { /** @type {boolean | null | undefined} */ let reverse; /** @type {Test} */ let test; /** @type {Visitor} */ let visitor; if ( typeof testOrVisitor === 'function' && typeof visitorOrReverse !== 'function' ) { test = undefined; visitor = testOrVisitor; reverse = visitorOrReverse; } else { // @ts-expect-error: assume the overload with test was given. test = testOrVisitor; // @ts-expect-error: assume the overload with test was given. visitor = visitorOrReverse; reverse = maybeReverse; } visitParents(tree, test, overload, reverse); /** * @param {UnistNode} node * @param {Array} parents */ function overload(node, parents) { const parent = parents[parents.length - 1]; const index = parent ? parent.children.indexOf(node) : undefined; return visitor(node, index, parent) } } /** * @typedef {import('mdast').Heading} Heading * @typedef {import('../types.js').State} State */ /** * @param {Heading} node * @param {State} state * @returns {boolean} */ function formatHeadingAsSetext(node, state) { let literalWithBreak = false; // Look for literals with a line break. // Note that this also visit(node, function (node) { if ( ('value' in node && /\r?\n|\r/.test(node.value)) || node.type === 'break' ) { literalWithBreak = true; return EXIT } }); return Boolean( (!node.depth || node.depth < 3) && toString$1(node) && (state.options.setext || literalWithBreak) ) } /** * @typedef {import('mdast').Heading} Heading * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ /** * @param {Heading} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function heading(node, _, state, info) { const rank = Math.max(Math.min(6, node.depth || 1), 1); const tracker = state.createTracker(info); if (formatHeadingAsSetext(node, state)) { const exit = state.enter('headingSetext'); const subexit = state.enter('phrasing'); const value = state.containerPhrasing(node, { ...tracker.current(), before: '\n', after: '\n' }); subexit(); exit(); return ( value + '\n' + (rank === 1 ? '=' : '-').repeat( // The whole size… value.length - // Minus the position of the character after the last EOL (or // 0 if there is none)… (Math.max(value.lastIndexOf('\r'), value.lastIndexOf('\n')) + 1) ) ) } const sequence = '#'.repeat(rank); const exit = state.enter('headingAtx'); const subexit = state.enter('phrasing'); // Note: for proper tracking, we should reset the output positions when there // is no content returned, because then the space is not output. // Practically, in that case, there is no content, so it doesn’t matter that // we’ve tracked one too many characters. tracker.move(sequence + ' '); let value = state.containerPhrasing(node, { before: '# ', after: '\n', ...tracker.current() }); if (/^[\t ]/.test(value)) { // To do: what effect has the character reference on tracking? value = '&#x' + value.charCodeAt(0).toString(16).toUpperCase() + ';' + value.slice(1); } value = value ? sequence + ' ' + value : sequence; if (state.options.closeAtx) { value += ' ' + sequence; } subexit(); exit(); return value } /** * @typedef {import('mdast').Html} Html */ html.peek = htmlPeek; /** * @param {Html} node * @returns {string} */ function html(node) { return node.value || '' } /** * @returns {string} */ function htmlPeek() { return '<' } /** * @typedef {import('mdast').Image} Image * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ image.peek = imagePeek; /** * @param {Image} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function image(node, _, state, info) { const quote = checkQuote(state); const suffix = quote === '"' ? 'Quote' : 'Apostrophe'; const exit = state.enter('image'); let subexit = state.enter('label'); const tracker = state.createTracker(info); let value = tracker.move('!['); value += tracker.move( state.safe(node.alt, {before: value, after: ']', ...tracker.current()}) ); value += tracker.move(']('); subexit(); if ( // If there’s no url but there is a title… (!node.url && node.title) || // If there are control characters or whitespace. /[\0- \u007F]/.test(node.url) ) { subexit = state.enter('destinationLiteral'); value += tracker.move('<'); value += tracker.move( state.safe(node.url, {before: value, after: '>', ...tracker.current()}) ); value += tracker.move('>'); } else { // No whitespace, raw is prettier. subexit = state.enter('destinationRaw'); value += tracker.move( state.safe(node.url, { before: value, after: node.title ? ' ' : ')', ...tracker.current() }) ); } subexit(); if (node.title) { subexit = state.enter(`title${suffix}`); value += tracker.move(' ' + quote); value += tracker.move( state.safe(node.title, { before: value, after: quote, ...tracker.current() }) ); value += tracker.move(quote); subexit(); } value += tracker.move(')'); exit(); return value } /** * @returns {string} */ function imagePeek() { return '!' } /** * @typedef {import('mdast').ImageReference} ImageReference * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ imageReference.peek = imageReferencePeek; /** * @param {ImageReference} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function imageReference(node, _, state, info) { const type = node.referenceType; const exit = state.enter('imageReference'); let subexit = state.enter('label'); const tracker = state.createTracker(info); let value = tracker.move('!['); const alt = state.safe(node.alt, { before: value, after: ']', ...tracker.current() }); value += tracker.move(alt + ']['); subexit(); // Hide the fact that we’re in phrasing, because escapes don’t work. const stack = state.stack; state.stack = []; subexit = state.enter('reference'); // Note: for proper tracking, we should reset the output positions when we end // up making a `shortcut` reference, because then there is no brace output. // Practically, in that case, there is no content, so it doesn’t matter that // we’ve tracked one too many characters. const reference = state.safe(state.associationId(node), { before: value, after: ']', ...tracker.current() }); subexit(); state.stack = stack; exit(); if (type === 'full' || !alt || alt !== reference) { value += tracker.move(reference + ']'); } else if (type === 'shortcut') { // Remove the unwanted `[`. value = value.slice(0, -1); } else { value += tracker.move(']'); } return value } /** * @returns {string} */ function imageReferencePeek() { return '!' } /** * @typedef {import('mdast').InlineCode} InlineCode * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').State} State */ inlineCode.peek = inlineCodePeek; /** * @param {InlineCode} node * @param {Parents | undefined} _ * @param {State} state * @returns {string} */ function inlineCode(node, _, state) { let value = node.value || ''; let sequence = '`'; let index = -1; // If there is a single grave accent on its own in the code, use a fence of // two. // If there are two in a row, use one. while (new RegExp('(^|[^`])' + sequence + '([^`]|$)').test(value)) { sequence += '`'; } // If this is not just spaces or eols (tabs don’t count), and either the // first or last character are a space, eol, or tick, then pad with spaces. if ( /[^ \r\n]/.test(value) && ((/^[ \r\n]/.test(value) && /[ \r\n]$/.test(value)) || /^`|`$/.test(value)) ) { value = ' ' + value + ' '; } // We have a potential problem: certain characters after eols could result in // blocks being seen. // For example, if someone injected the string `'\n# b'`, then that would // result in an ATX heading. // We can’t escape characters in `inlineCode`, but because eols are // transformed to spaces when going from markdown to HTML anyway, we can swap // them out. while (++index < state.unsafe.length) { const pattern = state.unsafe[index]; const expression = state.compilePattern(pattern); /** @type {RegExpExecArray | null} */ let match; // Only look for `atBreak`s. // Btw: note that `atBreak` patterns will always start the regex at LF or // CR. if (!pattern.atBreak) continue while ((match = expression.exec(value))) { let position = match.index; // Support CRLF (patterns only look for one of the characters). if ( value.charCodeAt(position) === 10 /* `\n` */ && value.charCodeAt(position - 1) === 13 /* `\r` */ ) { position--; } value = value.slice(0, position) + ' ' + value.slice(match.index + 1); } } return sequence + value + sequence } /** * @returns {string} */ function inlineCodePeek() { return '`' } /** * @typedef {import('mdast').Link} Link * @typedef {import('../types.js').State} State */ /** * @param {Link} node * @param {State} state * @returns {boolean} */ function formatLinkAsAutolink(node, state) { const raw = toString$1(node); return Boolean( !state.options.resourceLink && // If there’s a url… node.url && // And there’s a no title… !node.title && // And the content of `node` is a single text node… node.children && node.children.length === 1 && node.children[0].type === 'text' && // And if the url is the same as the content… (raw === node.url || 'mailto:' + raw === node.url) && // And that starts w/ a protocol… /^[a-z][a-z+.-]+:/i.test(node.url) && // And that doesn’t contain ASCII control codes (character escapes and // references don’t work), space, or angle brackets… !/[\0- <>\u007F]/.test(node.url) ) } /** * @typedef {import('mdast').Link} Link * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Exit} Exit * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ link.peek = linkPeek; /** * @param {Link} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function link(node, _, state, info) { const quote = checkQuote(state); const suffix = quote === '"' ? 'Quote' : 'Apostrophe'; const tracker = state.createTracker(info); /** @type {Exit} */ let exit; /** @type {Exit} */ let subexit; if (formatLinkAsAutolink(node, state)) { // Hide the fact that we’re in phrasing, because escapes don’t work. const stack = state.stack; state.stack = []; exit = state.enter('autolink'); let value = tracker.move('<'); value += tracker.move( state.containerPhrasing(node, { before: value, after: '>', ...tracker.current() }) ); value += tracker.move('>'); exit(); state.stack = stack; return value } exit = state.enter('link'); subexit = state.enter('label'); let value = tracker.move('['); value += tracker.move( state.containerPhrasing(node, { before: value, after: '](', ...tracker.current() }) ); value += tracker.move(']('); subexit(); if ( // If there’s no url but there is a title… (!node.url && node.title) || // If there are control characters or whitespace. /[\0- \u007F]/.test(node.url) ) { subexit = state.enter('destinationLiteral'); value += tracker.move('<'); value += tracker.move( state.safe(node.url, {before: value, after: '>', ...tracker.current()}) ); value += tracker.move('>'); } else { // No whitespace, raw is prettier. subexit = state.enter('destinationRaw'); value += tracker.move( state.safe(node.url, { before: value, after: node.title ? ' ' : ')', ...tracker.current() }) ); } subexit(); if (node.title) { subexit = state.enter(`title${suffix}`); value += tracker.move(' ' + quote); value += tracker.move( state.safe(node.title, { before: value, after: quote, ...tracker.current() }) ); value += tracker.move(quote); subexit(); } value += tracker.move(')'); exit(); return value } /** * @param {Link} node * @param {Parents | undefined} _ * @param {State} state * @returns {string} */ function linkPeek(node, _, state) { return formatLinkAsAutolink(node, state) ? '<' : '[' } /** * @typedef {import('mdast').LinkReference} LinkReference * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ linkReference.peek = linkReferencePeek; /** * @param {LinkReference} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function linkReference(node, _, state, info) { const type = node.referenceType; const exit = state.enter('linkReference'); let subexit = state.enter('label'); const tracker = state.createTracker(info); let value = tracker.move('['); const text = state.containerPhrasing(node, { before: value, after: ']', ...tracker.current() }); value += tracker.move(text + ']['); subexit(); // Hide the fact that we’re in phrasing, because escapes don’t work. const stack = state.stack; state.stack = []; subexit = state.enter('reference'); // Note: for proper tracking, we should reset the output positions when we end // up making a `shortcut` reference, because then there is no brace output. // Practically, in that case, there is no content, so it doesn’t matter that // we’ve tracked one too many characters. const reference = state.safe(state.associationId(node), { before: value, after: ']', ...tracker.current() }); subexit(); state.stack = stack; exit(); if (type === 'full' || !text || text !== reference) { value += tracker.move(reference + ']'); } else if (type === 'shortcut') { // Remove the unwanted `[`. value = value.slice(0, -1); } else { value += tracker.move(']'); } return value } /** * @returns {string} */ function linkReferencePeek() { return '[' } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkBullet(state) { const marker = state.options.bullet || '*'; if (marker !== '*' && marker !== '+' && marker !== '-') { throw new Error( 'Cannot serialize items with `' + marker + '` for `options.bullet`, expected `*`, `+`, or `-`' ) } return marker } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkBulletOther(state) { const bullet = checkBullet(state); const bulletOther = state.options.bulletOther; if (!bulletOther) { return bullet === '*' ? '-' : '*' } if (bulletOther !== '*' && bulletOther !== '+' && bulletOther !== '-') { throw new Error( 'Cannot serialize items with `' + bulletOther + '` for `options.bulletOther`, expected `*`, `+`, or `-`' ) } if (bulletOther === bullet) { throw new Error( 'Expected `bullet` (`' + bullet + '`) and `bulletOther` (`' + bulletOther + '`) to be different' ) } return bulletOther } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkBulletOrdered(state) { const marker = state.options.bulletOrdered || '.'; if (marker !== '.' && marker !== ')') { throw new Error( 'Cannot serialize items with `' + marker + '` for `options.bulletOrdered`, expected `.` or `)`' ) } return marker } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkRule(state) { const marker = state.options.rule || '*'; if (marker !== '*' && marker !== '-' && marker !== '_') { throw new Error( 'Cannot serialize rules with `' + marker + '` for `options.rule`, expected `*`, `-`, or `_`' ) } return marker } /** * @typedef {import('mdast').List} List * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ /** * @param {List} node * @param {Parents | undefined} parent * @param {State} state * @param {Info} info * @returns {string} */ function list$1(node, parent, state, info) { const exit = state.enter('list'); const bulletCurrent = state.bulletCurrent; /** @type {string} */ let bullet = node.ordered ? checkBulletOrdered(state) : checkBullet(state); /** @type {string} */ const bulletOther = node.ordered ? bullet === '.' ? ')' : '.' : checkBulletOther(state); let useDifferentMarker = parent && state.bulletLastUsed ? bullet === state.bulletLastUsed : false; if (!node.ordered) { const firstListItem = node.children ? node.children[0] : undefined; // If there’s an empty first list item directly in two list items, // we have to use a different bullet: // // ```markdown // * - * // ``` // // …because otherwise it would become one big thematic break. if ( // Bullet could be used as a thematic break marker: (bullet === '*' || bullet === '-') && // Empty first list item: firstListItem && (!firstListItem.children || !firstListItem.children[0]) && // Directly in two other list items: state.stack[state.stack.length - 1] === 'list' && state.stack[state.stack.length - 2] === 'listItem' && state.stack[state.stack.length - 3] === 'list' && state.stack[state.stack.length - 4] === 'listItem' && // That are each the first child. state.indexStack[state.indexStack.length - 1] === 0 && state.indexStack[state.indexStack.length - 2] === 0 && state.indexStack[state.indexStack.length - 3] === 0 ) { useDifferentMarker = true; } // If there’s a thematic break at the start of the first list item, // we have to use a different bullet: // // ```markdown // * --- // ``` // // …because otherwise it would become one big thematic break. if (checkRule(state) === bullet && firstListItem) { let index = -1; while (++index < node.children.length) { const item = node.children[index]; if ( item && item.type === 'listItem' && item.children && item.children[0] && item.children[0].type === 'thematicBreak' ) { useDifferentMarker = true; break } } } } if (useDifferentMarker) { bullet = bulletOther; } state.bulletCurrent = bullet; const value = state.containerFlow(node, info); state.bulletLastUsed = bullet; state.bulletCurrent = bulletCurrent; exit(); return value } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkListItemIndent(state) { const style = state.options.listItemIndent || 'one'; if (style !== 'tab' && style !== 'one' && style !== 'mixed') { throw new Error( 'Cannot serialize items with `' + style + '` for `options.listItemIndent`, expected `tab`, `one`, or `mixed`' ) } return style } /** * @typedef {import('mdast').ListItem} ListItem * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').Map} Map * @typedef {import('../types.js').State} State */ /** * @param {ListItem} node * @param {Parents | undefined} parent * @param {State} state * @param {Info} info * @returns {string} */ function listItem(node, parent, state, info) { const listItemIndent = checkListItemIndent(state); let bullet = state.bulletCurrent || checkBullet(state); // Add the marker value for ordered lists. if (parent && parent.type === 'list' && parent.ordered) { bullet = (typeof parent.start === 'number' && parent.start > -1 ? parent.start : 1) + (state.options.incrementListMarker === false ? 0 : parent.children.indexOf(node)) + bullet; } let size = bullet.length + 1; if ( listItemIndent === 'tab' || (listItemIndent === 'mixed' && ((parent && parent.type === 'list' && parent.spread) || node.spread)) ) { size = Math.ceil(size / 4) * 4; } const tracker = state.createTracker(info); tracker.move(bullet + ' '.repeat(size - bullet.length)); tracker.shift(size); const exit = state.enter('listItem'); const value = state.indentLines( state.containerFlow(node, tracker.current()), map ); exit(); return value /** @type {Map} */ function map(line, index, blank) { if (index) { return (blank ? '' : ' '.repeat(size)) + line } return (blank ? bullet : bullet + ' '.repeat(size - bullet.length)) + line } } /** * @typedef {import('mdast').Paragraph} Paragraph * @typedef {import('mdast').Parents} Parents * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ /** * @param {Paragraph} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function paragraph(node, _, state, info) { const exit = state.enter('paragraph'); const subexit = state.enter('phrasing'); const value = state.containerPhrasing(node, info); subexit(); exit(); return value } /** * @typedef {import('mdast').Html} Html * @typedef {import('mdast').PhrasingContent} PhrasingContent */ /** * Check if the given value is *phrasing content*. * * > 👉 **Note**: Excludes `html`, which can be both phrasing or flow. * * @param node * Thing to check, typically `Node`. * @returns * Whether `value` is phrasing content. */ const phrasing = /** @type {(node?: unknown) => node is Exclude} */ ( convert([ 'break', 'delete', 'emphasis', // To do: next major: removed since footnotes were added to GFM. 'footnote', 'footnoteReference', 'image', 'imageReference', 'inlineCode', // Enabled by `mdast-util-math`: 'inlineMath', 'link', 'linkReference', // Enabled by `mdast-util-mdx`: 'mdxJsxTextElement', // Enabled by `mdast-util-mdx`: 'mdxTextExpression', 'strong', 'text', // Enabled by `mdast-util-directive`: 'textDirective' ]) ); /** * @typedef {import('mdast').Parents} Parents * @typedef {import('mdast').Root} Root * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ /** * @param {Root} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function root$1(node, _, state, info) { // Note: `html` nodes are ambiguous. const hasPhrasing = node.children.some(function (d) { return phrasing(d) }); const fn = hasPhrasing ? state.containerPhrasing : state.containerFlow; return fn.call(state, node, info) } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkStrong(state) { const marker = state.options.strong || '*'; if (marker !== '*' && marker !== '_') { throw new Error( 'Cannot serialize strong with `' + marker + '` for `options.strong`, expected `*`, or `_`' ) } return marker } /** * @typedef {import('mdast').Parents} Parents * @typedef {import('mdast').Strong} Strong * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ strong.peek = strongPeek; // To do: there are cases where emphasis cannot “form” depending on the // previous or next character of sequences. // There’s no way around that though, except for injecting zero-width stuff. // Do we need to safeguard against that? /** * @param {Strong} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function strong(node, _, state, info) { const marker = checkStrong(state); const exit = state.enter('strong'); const tracker = state.createTracker(info); let value = tracker.move(marker + marker); value += tracker.move( state.containerPhrasing(node, { before: value, after: marker, ...tracker.current() }) ); value += tracker.move(marker + marker); exit(); return value } /** * @param {Strong} _ * @param {Parents | undefined} _1 * @param {State} state * @returns {string} */ function strongPeek(_, _1, state) { return state.options.strong || '*' } /** * @typedef {import('mdast').Parents} Parents * @typedef {import('mdast').Text} Text * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').State} State */ /** * @param {Text} node * @param {Parents | undefined} _ * @param {State} state * @param {Info} info * @returns {string} */ function text(node, _, state, info) { return state.safe(node.value, info) } /** * @typedef {import('../types.js').Options} Options * @typedef {import('../types.js').State} State */ /** * @param {State} state * @returns {Exclude} */ function checkRuleRepetition(state) { const repetition = state.options.ruleRepetition || 3; if (repetition < 3) { throw new Error( 'Cannot serialize rules with repetition `' + repetition + '` for `options.ruleRepetition`, expected `3` or more' ) } return repetition } /** * @typedef {import('mdast').Parents} Parents * @typedef {import('mdast').ThematicBreak} ThematicBreak * @typedef {import('../types.js').State} State */ /** * @param {ThematicBreak} _ * @param {Parents | undefined} _1 * @param {State} state * @returns {string} */ function thematicBreak(_, _1, state) { const value = ( checkRule(state) + (state.options.ruleSpaces ? ' ' : '') ).repeat(checkRuleRepetition(state)); return state.options.ruleSpaces ? value.slice(0, -1) : value } /** * Default (CommonMark) handlers. */ const handle = { blockquote, break: hardBreak, code, definition, emphasis, hardBreak, heading, html, image, imageReference, inlineCode, link, linkReference, list: list$1, listItem, paragraph, root: root$1, strong, text, thematicBreak }; /** * @typedef {import('./types.js').Join} Join */ /** @type {Array} */ const join = [joinDefaults]; /** @type {Join} */ function joinDefaults(left, right, parent, state) { // Indented code after list or another indented code. if ( right.type === 'code' && formatCodeAsIndented(right, state) && (left.type === 'list' || (left.type === right.type && formatCodeAsIndented(left, state))) ) { return false } // Join children of a list or an item. // In which case, `parent` has a `spread` field. if ('spread' in parent && typeof parent.spread === 'boolean') { if ( left.type === 'paragraph' && // Two paragraphs. (left.type === right.type || right.type === 'definition' || // Paragraph followed by a setext heading. (right.type === 'heading' && formatHeadingAsSetext(right, state))) ) { return } return parent.spread ? 1 : 0 } } /** * @typedef {import('./types.js').ConstructName} ConstructName * @typedef {import('./types.js').Unsafe} Unsafe */ /** * List of constructs that occur in phrasing (paragraphs, headings), but cannot * contain things like attention (emphasis, strong), images, or links. * So they sort of cancel each other out. * Note: could use a better name. * * @type {Array} */ const fullPhrasingSpans = [ 'autolink', 'destinationLiteral', 'destinationRaw', 'reference', 'titleQuote', 'titleApostrophe' ]; /** @type {Array} */ const unsafe = [ {character: '\t', after: '[\\r\\n]', inConstruct: 'phrasing'}, {character: '\t', before: '[\\r\\n]', inConstruct: 'phrasing'}, { character: '\t', inConstruct: ['codeFencedLangGraveAccent', 'codeFencedLangTilde'] }, { character: '\r', inConstruct: [ 'codeFencedLangGraveAccent', 'codeFencedLangTilde', 'codeFencedMetaGraveAccent', 'codeFencedMetaTilde', 'destinationLiteral', 'headingAtx' ] }, { character: '\n', inConstruct: [ 'codeFencedLangGraveAccent', 'codeFencedLangTilde', 'codeFencedMetaGraveAccent', 'codeFencedMetaTilde', 'destinationLiteral', 'headingAtx' ] }, {character: ' ', after: '[\\r\\n]', inConstruct: 'phrasing'}, {character: ' ', before: '[\\r\\n]', inConstruct: 'phrasing'}, { character: ' ', inConstruct: ['codeFencedLangGraveAccent', 'codeFencedLangTilde'] }, // An exclamation mark can start an image, if it is followed by a link or // a link reference. { character: '!', after: '\\[', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans }, // A quote can break out of a title. {character: '"', inConstruct: 'titleQuote'}, // A number sign could start an ATX heading if it starts a line. {atBreak: true, character: '#'}, {character: '#', inConstruct: 'headingAtx', after: '(?:[\r\n]|$)'}, // Dollar sign and percentage are not used in markdown. // An ampersand could start a character reference. {character: '&', after: '[#A-Za-z]', inConstruct: 'phrasing'}, // An apostrophe can break out of a title. {character: "'", inConstruct: 'titleApostrophe'}, // A left paren could break out of a destination raw. {character: '(', inConstruct: 'destinationRaw'}, // A left paren followed by `]` could make something into a link or image. { before: '\\]', character: '(', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans }, // A right paren could start a list item or break out of a destination // raw. {atBreak: true, before: '\\d+', character: ')'}, {character: ')', inConstruct: 'destinationRaw'}, // An asterisk can start thematic breaks, list items, emphasis, strong. {atBreak: true, character: '*', after: '(?:[ \t\r\n*])'}, {character: '*', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans}, // A plus sign could start a list item. {atBreak: true, character: '+', after: '(?:[ \t\r\n])'}, // A dash can start thematic breaks, list items, and setext heading // underlines. {atBreak: true, character: '-', after: '(?:[ \t\r\n-])'}, // A dot could start a list item. {atBreak: true, before: '\\d+', character: '.', after: '(?:[ \t\r\n]|$)'}, // Slash, colon, and semicolon are not used in markdown for constructs. // A less than can start html (flow or text) or an autolink. // HTML could start with an exclamation mark (declaration, cdata, comment), // slash (closing tag), question mark (instruction), or a letter (tag). // An autolink also starts with a letter. // Finally, it could break out of a destination literal. {atBreak: true, character: '<', after: '[!/?A-Za-z]'}, { character: '<', after: '[!/?A-Za-z]', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans }, {character: '<', inConstruct: 'destinationLiteral'}, // An equals to can start setext heading underlines. {atBreak: true, character: '='}, // A greater than can start block quotes and it can break out of a // destination literal. {atBreak: true, character: '>'}, {character: '>', inConstruct: 'destinationLiteral'}, // Question mark and at sign are not used in markdown for constructs. // A left bracket can start definitions, references, labels, {atBreak: true, character: '['}, {character: '[', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans}, {character: '[', inConstruct: ['label', 'reference']}, // A backslash can start an escape (when followed by punctuation) or a // hard break (when followed by an eol). // Note: typical escapes are handled in `safe`! {character: '\\', after: '[\\r\\n]', inConstruct: 'phrasing'}, // A right bracket can exit labels. {character: ']', inConstruct: ['label', 'reference']}, // Caret is not used in markdown for constructs. // An underscore can start emphasis, strong, or a thematic break. {atBreak: true, character: '_'}, {character: '_', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans}, // A grave accent can start code (fenced or text), or it can break out of // a grave accent code fence. {atBreak: true, character: '`'}, { character: '`', inConstruct: ['codeFencedLangGraveAccent', 'codeFencedMetaGraveAccent'] }, {character: '`', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans}, // Left brace, vertical bar, right brace are not used in markdown for // constructs. // A tilde can start code (fenced). {atBreak: true, character: '~'} ]; /** * @typedef {import('../types.js').AssociationId} AssociationId */ /** * Get an identifier from an association to match it to others. * * Associations are nodes that match to something else through an ID: * . * * The `label` of an association is the string value: character escapes and * references work, and casing is intact. * The `identifier` is used to match one association to another: * controversially, character escapes and references don’t work in this * matching: `©` does not match `©`, and `\+` does not match `+`. * * But casing is ignored (and whitespace) is trimmed and collapsed: ` A\nb` * matches `a b`. * So, we do prefer the label when figuring out how we’re going to serialize: * it has whitespace, casing, and we can ignore most useless character * escapes and all character references. * * @type {AssociationId} */ function association(node) { if (node.label || !node.identifier) { return node.label || '' } return decodeString(node.identifier) } /** * @typedef {import('../types.js').CompilePattern} CompilePattern */ /** * @type {CompilePattern} */ function compilePattern(pattern) { if (!pattern._compiled) { const before = (pattern.atBreak ? '[\\r\\n][\\t ]*' : '') + (pattern.before ? '(?:' + pattern.before + ')' : ''); pattern._compiled = new RegExp( (before ? '(' + before + ')' : '') + (/[|\\{}()[\]^$+*?.-]/.test(pattern.character) ? '\\' : '') + pattern.character + (pattern.after ? '(?:' + pattern.after + ')' : ''), 'g' ); } return pattern._compiled } /** * @typedef {import('../types.js').Handle} Handle * @typedef {import('../types.js').Info} Info * @typedef {import('../types.js').PhrasingParents} PhrasingParents * @typedef {import('../types.js').State} State */ /** * Serialize the children of a parent that contains phrasing children. * * These children will be joined flush together. * * @param {PhrasingParents} parent * Parent of flow nodes. * @param {State} state * Info passed around about the current state. * @param {Info} info * Info on where we are in the document we are generating. * @returns {string} * Serialized children, joined together. */ function containerPhrasing(parent, state, info) { const indexStack = state.indexStack; const children = parent.children || []; /** @type {Array} */ const results = []; let index = -1; let before = info.before; indexStack.push(-1); let tracker = state.createTracker(info); while (++index < children.length) { const child = children[index]; /** @type {string} */ let after; indexStack[indexStack.length - 1] = index; if (index + 1 < children.length) { /** @type {Handle} */ // @ts-expect-error: hush, it’s actually a `zwitch`. let handle = state.handle.handlers[children[index + 1].type]; /** @type {Handle} */ // @ts-expect-error: hush, it’s actually a `zwitch`. if (handle && handle.peek) handle = handle.peek; after = handle ? handle(children[index + 1], parent, state, { before: '', after: '', ...tracker.current() }).charAt(0) : ''; } else { after = info.after; } // In some cases, html (text) can be found in phrasing right after an eol. // When we’d serialize that, in most cases that would be seen as html // (flow). // As we can’t escape or so to prevent it from happening, we take a somewhat // reasonable approach: replace that eol with a space. // See: if ( results.length > 0 && (before === '\r' || before === '\n') && child.type === 'html' ) { results[results.length - 1] = results[results.length - 1].replace( /(\r?\n|\r)$/, ' ' ); before = ' '; // To do: does this work to reset tracker? tracker = state.createTracker(info); tracker.move(results.join('')); } results.push( tracker.move( state.handle(child, parent, state, { ...tracker.current(), before, after }) ) ); before = results[results.length - 1].slice(-1); } indexStack.pop(); return results.join('') } /** * @typedef {import('../types.js').FlowParents} FlowParents * @typedef {import('../types.js').FlowChildren} FlowChildren * @typedef {import('../types.js').State} State * @typedef {import('../types.js').TrackFields} TrackFields */ /** * @param {FlowParents} parent * Parent of flow nodes. * @param {State} state * Info passed around about the current state. * @param {TrackFields} info * Info on where we are in the document we are generating. * @returns {string} * Serialized children, joined by (blank) lines. */ function containerFlow(parent, state, info) { const indexStack = state.indexStack; const children = parent.children || []; const tracker = state.createTracker(info); /** @type {Array} */ const results = []; let index = -1; indexStack.push(-1); while (++index < children.length) { const child = children[index]; indexStack[indexStack.length - 1] = index; results.push( tracker.move( state.handle(child, parent, state, { before: '\n', after: '\n', ...tracker.current() }) ) ); if (child.type !== 'list') { state.bulletLastUsed = undefined; } if (index < children.length - 1) { results.push( tracker.move(between(child, children[index + 1], parent, state)) ); } } indexStack.pop(); return results.join('') } /** * @param {FlowChildren} left * @param {FlowChildren} right * @param {FlowParents} parent * @param {State} state * @returns {string} */ function between(left, right, parent, state) { let index = state.join.length; while (index--) { const result = state.join[index](left, right, parent, state); if (result === true || result === 1) { break } if (typeof result === 'number') { return '\n'.repeat(1 + result) } if (result === false) { return '\n\n\n\n' } } return '\n\n' } /** * @typedef {import('../types.js').IndentLines} IndentLines */ const eol = /\r?\n|\r/g; /** * @type {IndentLines} */ function indentLines(value, map) { /** @type {Array} */ const result = []; let start = 0; let line = 0; /** @type {RegExpExecArray | null} */ let match; while ((match = eol.exec(value))) { one(value.slice(start, match.index)); result.push(match[0]); start = match.index + match[0].length; line++; } one(value.slice(start)); return result.join('') /** * @param {string} value */ function one(value) { result.push(map(value, line, !value)); } } /** * @typedef {import('../types.js').SafeConfig} SafeConfig * @typedef {import('../types.js').State} State */ /** * Make a string safe for embedding in markdown constructs. * * In markdown, almost all punctuation characters can, in certain cases, * result in something. * Whether they do is highly subjective to where they happen and in what * they happen. * * To solve this, `mdast-util-to-markdown` tracks: * * * Characters before and after something; * * What “constructs” we are in. * * This information is then used by this function to escape or encode * special characters. * * @param {State} state * Info passed around about the current state. * @param {string | null | undefined} input * Raw value to make safe. * @param {SafeConfig} config * Configuration. * @returns {string} * Serialized markdown safe for embedding. */ function safe(state, input, config) { const value = (config.before || '') + (input || '') + (config.after || ''); /** @type {Array} */ const positions = []; /** @type {Array} */ const result = []; /** @type {Record} */ const infos = {}; let index = -1; while (++index < state.unsafe.length) { const pattern = state.unsafe[index]; if (!patternInScope(state.stack, pattern)) { continue } const expression = state.compilePattern(pattern); /** @type {RegExpExecArray | null} */ let match; while ((match = expression.exec(value))) { const before = 'before' in pattern || Boolean(pattern.atBreak); const after = 'after' in pattern; const position = match.index + (before ? match[1].length : 0); if (positions.includes(position)) { if (infos[position].before && !before) { infos[position].before = false; } if (infos[position].after && !after) { infos[position].after = false; } } else { positions.push(position); infos[position] = {before, after}; } } } positions.sort(numerical); let start = config.before ? config.before.length : 0; const end = value.length - (config.after ? config.after.length : 0); index = -1; while (++index < positions.length) { const position = positions[index]; // Character before or after matched: if (position < start || position >= end) { continue } // If this character is supposed to be escaped because it has a condition on // the next character, and the next character is definitly being escaped, // then skip this escape. if ( (position + 1 < end && positions[index + 1] === position + 1 && infos[position].after && !infos[position + 1].before && !infos[position + 1].after) || (positions[index - 1] === position - 1 && infos[position].before && !infos[position - 1].before && !infos[position - 1].after) ) { continue } if (start !== position) { // If we have to use a character reference, an ampersand would be more // correct, but as backslashes only care about punctuation, either will // do the trick result.push(escapeBackslashes(value.slice(start, position), '\\')); } start = position; if ( /[!-/:-@[-`{-~]/.test(value.charAt(position)) && (!config.encode || !config.encode.includes(value.charAt(position))) ) { // Character escape. result.push('\\'); } else { // Character reference. result.push( '&#x' + value.charCodeAt(position).toString(16).toUpperCase() + ';' ); start++; } } result.push(escapeBackslashes(value.slice(start, end), config.after)); return result.join('') } /** * @param {number} a * @param {number} b * @returns {number} */ function numerical(a, b) { return a - b } /** * @param {string} value * @param {string} after * @returns {string} */ function escapeBackslashes(value, after) { const expression = /\\(?=[!-/:-@[-`{-~])/g; /** @type {Array} */ const positions = []; /** @type {Array} */ const results = []; const whole = value + after; let index = -1; let start = 0; /** @type {RegExpExecArray | null} */ let match; while ((match = expression.exec(whole))) { positions.push(match.index); } while (++index < positions.length) { if (start !== positions[index]) { results.push(value.slice(start, positions[index])); } results.push('\\'); start = positions[index]; } results.push(value.slice(start)); return results.join('') } /** * @typedef {import('../types.js').CreateTracker} CreateTracker * @typedef {import('../types.js').TrackCurrent} TrackCurrent * @typedef {import('../types.js').TrackMove} TrackMove * @typedef {import('../types.js').TrackShift} TrackShift */ /** * Track positional info in the output. * * @type {CreateTracker} */ function track(config) { // Defaults are used to prevent crashes when older utilities somehow activate // this code. /* c8 ignore next 5 */ const options = config || {}; const now = options.now || {}; let lineShift = options.lineShift || 0; let line = now.line || 1; let column = now.column || 1; return {move, current, shift} /** * Get the current tracked info. * * @type {TrackCurrent} */ function current() { return {now: {line, column}, lineShift} } /** * Define an increased line shift (the typical indent for lines). * * @type {TrackShift} */ function shift(value) { lineShift += value; } /** * Move past some generated markdown. * * @type {TrackMove} */ function move(input) { // eslint-disable-next-line unicorn/prefer-default-parameters const value = input || ''; const chunks = value.split(/\r?\n|\r/g); const tail = chunks[chunks.length - 1]; line += chunks.length - 1; column = chunks.length === 1 ? column + tail.length : 1 + tail.length + lineShift; return value } } /** * @typedef {import('mdast').Nodes} Nodes * @typedef {import('./types.js').Enter} Enter * @typedef {import('./types.js').Info} Info * @typedef {import('./types.js').Join} Join * @typedef {import('./types.js').FlowParents} FlowParents * @typedef {import('./types.js').Options} Options * @typedef {import('./types.js').PhrasingParents} PhrasingParents * @typedef {import('./types.js').SafeConfig} SafeConfig * @typedef {import('./types.js').State} State * @typedef {import('./types.js').TrackFields} TrackFields */ /** * Turn an mdast syntax tree into markdown. * * @param {Nodes} tree * Tree to serialize. * @param {Options} [options] * Configuration (optional). * @returns {string} * Serialized markdown representing `tree`. */ function toMarkdown(tree, options = {}) { /** @type {State} */ const state = { enter, indentLines, associationId: association, containerPhrasing: containerPhrasingBound, containerFlow: containerFlowBound, createTracker: track, compilePattern, safe: safeBound, stack: [], unsafe: [...unsafe], join: [...join], // @ts-expect-error: GFM / frontmatter are typed in `mdast` but not defined // here. handlers: {...handle}, options: {}, indexStack: [], // @ts-expect-error: add `handle` in a second. handle: undefined }; configure(state, options); if (state.options.tightDefinitions) { state.join.push(joinDefinition); } state.handle = zwitch('type', { invalid, unknown, handlers: state.handlers }); let result = state.handle(tree, undefined, state, { before: '\n', after: '\n', now: {line: 1, column: 1}, lineShift: 0 }); if ( result && result.charCodeAt(result.length - 1) !== 10 && result.charCodeAt(result.length - 1) !== 13 ) { result += '\n'; } return result /** @type {Enter} */ function enter(name) { state.stack.push(name); return exit /** * @returns {undefined} */ function exit() { state.stack.pop(); } } } /** * @param {unknown} value * @returns {never} */ function invalid(value) { throw new Error('Cannot handle value `' + value + '`, expected node') } /** * @param {unknown} value * @returns {never} */ function unknown(value) { // Always a node. const node = /** @type {Nodes} */ (value); throw new Error('Cannot handle unknown node `' + node.type + '`') } /** @type {Join} */ function joinDefinition(left, right) { // No blank line between adjacent definitions. if (left.type === 'definition' && left.type === right.type) { return 0 } } /** * Serialize the children of a parent that contains phrasing children. * * These children will be joined flush together. * * @this {State} * Info passed around about the current state. * @param {PhrasingParents} parent * Parent of flow nodes. * @param {Info} info * Info on where we are in the document we are generating. * @returns {string} * Serialized children, joined together. */ function containerPhrasingBound(parent, info) { return containerPhrasing(parent, this, info) } /** * Serialize the children of a parent that contains flow children. * * These children will typically be joined by blank lines. * What they are joined by exactly is defined by `Join` functions. * * @this {State} * Info passed around about the current state. * @param {FlowParents} parent * Parent of flow nodes. * @param {TrackFields} info * Info on where we are in the document we are generating. * @returns {string} * Serialized children, joined by (blank) lines. */ function containerFlowBound(parent, info) { return containerFlow(parent, this, info) } /** * Make a string safe for embedding in markdown constructs. * * In markdown, almost all punctuation characters can, in certain cases, * result in something. * Whether they do is highly subjective to where they happen and in what * they happen. * * To solve this, `mdast-util-to-markdown` tracks: * * * Characters before and after something; * * What “constructs” we are in. * * This information is then used by this function to escape or encode * special characters. * * @this {State} * Info passed around about the current state. * @param {string | null | undefined} value * Raw value to make safe. * @param {SafeConfig} config * Configuration. * @returns {string} * Serialized markdown safe for embedding. */ function safeBound(value, config) { return safe(this, value, config) } /** * @typedef {import('mdast').Root} Root * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownOptions * @typedef {import('unified').Compiler} Compiler * @typedef {import('unified').Processor} Processor */ /** * Add support for serializing to markdown. * * @param {Readonly | null | undefined} [options] * Configuration (optional). * @returns {undefined} * Nothing. */ function remarkStringify(options) { /** @type {Processor} */ // @ts-expect-error: TS in JSDoc generates wrong types if `this` is typed regularly. const self = this; self.compiler = compiler; /** * @type {Compiler} */ function compiler(tree) { return toMarkdown(tree, { ...self.data('settings'), ...options, // Note: this option is not in the readme. // The goal is for it to be set by plugins on `data` instead of being // passed by users. extensions: self.data('toMarkdownExtensions') || [] }) } } const ANSI_BACKGROUND_OFFSET = 10; const wrapAnsi16 = (offset = 0) => code => `\u001B[${code + offset}m`; const wrapAnsi256 = (offset = 0) => code => `\u001B[${38 + offset};5;${code}m`; const wrapAnsi16m = (offset = 0) => (red, green, blue) => `\u001B[${38 + offset};2;${red};${green};${blue}m`; const styles$1 = { modifier: { reset: [0, 0], // 21 isn't widely supported and 22 does the same thing bold: [1, 22], dim: [2, 22], italic: [3, 23], underline: [4, 24], overline: [53, 55], inverse: [7, 27], hidden: [8, 28], strikethrough: [9, 29], }, color: { black: [30, 39], red: [31, 39], green: [32, 39], yellow: [33, 39], blue: [34, 39], magenta: [35, 39], cyan: [36, 39], white: [37, 39], // Bright color blackBright: [90, 39], gray: [90, 39], // Alias of `blackBright` grey: [90, 39], // Alias of `blackBright` redBright: [91, 39], greenBright: [92, 39], yellowBright: [93, 39], blueBright: [94, 39], magentaBright: [95, 39], cyanBright: [96, 39], whiteBright: [97, 39], }, bgColor: { bgBlack: [40, 49], bgRed: [41, 49], bgGreen: [42, 49], bgYellow: [43, 49], bgBlue: [44, 49], bgMagenta: [45, 49], bgCyan: [46, 49], bgWhite: [47, 49], // Bright color bgBlackBright: [100, 49], bgGray: [100, 49], // Alias of `bgBlackBright` bgGrey: [100, 49], // Alias of `bgBlackBright` bgRedBright: [101, 49], bgGreenBright: [102, 49], bgYellowBright: [103, 49], bgBlueBright: [104, 49], bgMagentaBright: [105, 49], bgCyanBright: [106, 49], bgWhiteBright: [107, 49], }, }; Object.keys(styles$1.modifier); const foregroundColorNames = Object.keys(styles$1.color); const backgroundColorNames = Object.keys(styles$1.bgColor); [...foregroundColorNames, ...backgroundColorNames]; function assembleStyles() { const codes = new Map(); for (const [groupName, group] of Object.entries(styles$1)) { for (const [styleName, style] of Object.entries(group)) { styles$1[styleName] = { open: `\u001B[${style[0]}m`, close: `\u001B[${style[1]}m`, }; group[styleName] = styles$1[styleName]; codes.set(style[0], style[1]); } Object.defineProperty(styles$1, groupName, { value: group, enumerable: false, }); } Object.defineProperty(styles$1, 'codes', { value: codes, enumerable: false, }); styles$1.color.close = '\u001B[39m'; styles$1.bgColor.close = '\u001B[49m'; styles$1.color.ansi = wrapAnsi16(); styles$1.color.ansi256 = wrapAnsi256(); styles$1.color.ansi16m = wrapAnsi16m(); styles$1.bgColor.ansi = wrapAnsi16(ANSI_BACKGROUND_OFFSET); styles$1.bgColor.ansi256 = wrapAnsi256(ANSI_BACKGROUND_OFFSET); styles$1.bgColor.ansi16m = wrapAnsi16m(ANSI_BACKGROUND_OFFSET); // From https://github.com/Qix-/color-convert/blob/3f0e0d4e92e235796ccb17f6e85c72094a651f49/conversions.js Object.defineProperties(styles$1, { rgbToAnsi256: { value(red, green, blue) { // We use the extended greyscale palette here, with the exception of // black and white. normal palette only has 4 greyscale shades. if (red === green && green === blue) { if (red < 8) { return 16; } if (red > 248) { return 231; } return Math.round(((red - 8) / 247) * 24) + 232; } return 16 + (36 * Math.round(red / 255 * 5)) + (6 * Math.round(green / 255 * 5)) + Math.round(blue / 255 * 5); }, enumerable: false, }, hexToRgb: { value(hex) { const matches = /[a-f\d]{6}|[a-f\d]{3}/i.exec(hex.toString(16)); if (!matches) { return [0, 0, 0]; } let [colorString] = matches; if (colorString.length === 3) { colorString = [...colorString].map(character => character + character).join(''); } const integer = Number.parseInt(colorString, 16); return [ /* eslint-disable no-bitwise */ (integer >> 16) & 0xFF, (integer >> 8) & 0xFF, integer & 0xFF, /* eslint-enable no-bitwise */ ]; }, enumerable: false, }, hexToAnsi256: { value: hex => styles$1.rgbToAnsi256(...styles$1.hexToRgb(hex)), enumerable: false, }, ansi256ToAnsi: { value(code) { if (code < 8) { return 30 + code; } if (code < 16) { return 90 + (code - 8); } let red; let green; let blue; if (code >= 232) { red = (((code - 232) * 10) + 8) / 255; green = red; blue = red; } else { code -= 16; const remainder = code % 36; red = Math.floor(code / 36) / 5; green = Math.floor(remainder / 6) / 5; blue = (remainder % 6) / 5; } const value = Math.max(red, green, blue) * 2; if (value === 0) { return 30; } // eslint-disable-next-line no-bitwise let result = 30 + ((Math.round(blue) << 2) | (Math.round(green) << 1) | Math.round(red)); if (value === 2) { result += 60; } return result; }, enumerable: false, }, rgbToAnsi: { value: (red, green, blue) => styles$1.ansi256ToAnsi(styles$1.rgbToAnsi256(red, green, blue)), enumerable: false, }, hexToAnsi: { value: hex => styles$1.ansi256ToAnsi(styles$1.hexToAnsi256(hex)), enumerable: false, }, }); return styles$1; } const ansiStyles = assembleStyles(); // From: https://github.com/sindresorhus/has-flag/blob/main/index.js /// function hasFlag(flag, argv = globalThis.Deno?.args ?? process.argv) { function hasFlag(flag, argv = globalThis.Deno ? globalThis.Deno.args : process$1.argv) { const prefix = flag.startsWith('-') ? '' : (flag.length === 1 ? '-' : '--'); const position = argv.indexOf(prefix + flag); const terminatorPosition = argv.indexOf('--'); return position !== -1 && (terminatorPosition === -1 || position < terminatorPosition); } const {env} = process$1; let flagForceColor; if ( hasFlag('no-color') || hasFlag('no-colors') || hasFlag('color=false') || hasFlag('color=never') ) { flagForceColor = 0; } else if ( hasFlag('color') || hasFlag('colors') || hasFlag('color=true') || hasFlag('color=always') ) { flagForceColor = 1; } function envForceColor() { if ('FORCE_COLOR' in env) { if (env.FORCE_COLOR === 'true') { return 1; } if (env.FORCE_COLOR === 'false') { return 0; } return env.FORCE_COLOR.length === 0 ? 1 : Math.min(Number.parseInt(env.FORCE_COLOR, 10), 3); } } function translateLevel(level) { if (level === 0) { return false; } return { level, hasBasic: true, has256: level >= 2, has16m: level >= 3, }; } function _supportsColor(haveStream, {streamIsTTY, sniffFlags = true} = {}) { const noFlagForceColor = envForceColor(); if (noFlagForceColor !== undefined) { flagForceColor = noFlagForceColor; } const forceColor = sniffFlags ? flagForceColor : noFlagForceColor; if (forceColor === 0) { return 0; } if (sniffFlags) { if (hasFlag('color=16m') || hasFlag('color=full') || hasFlag('color=truecolor')) { return 3; } if (hasFlag('color=256')) { return 2; } } // Check for Azure DevOps pipelines. // Has to be above the `!streamIsTTY` check. if ('TF_BUILD' in env && 'AGENT_NAME' in env) { return 1; } if (haveStream && !streamIsTTY && forceColor === undefined) { return 0; } const min = forceColor || 0; if (env.TERM === 'dumb') { return min; } if (process$1.platform === 'win32') { // Windows 10 build 10586 is the first Windows release that supports 256 colors. // Windows 10 build 14931 is the first release that supports 16m/TrueColor. const osRelease = os.release().split('.'); if ( Number(osRelease[0]) >= 10 && Number(osRelease[2]) >= 10_586 ) { return Number(osRelease[2]) >= 14_931 ? 3 : 2; } return 1; } if ('CI' in env) { if ('GITHUB_ACTIONS' in env || 'GITEA_ACTIONS' in env) { return 3; } if (['TRAVIS', 'CIRCLECI', 'APPVEYOR', 'GITLAB_CI', 'BUILDKITE', 'DRONE'].some(sign => sign in env) || env.CI_NAME === 'codeship') { return 1; } return min; } if ('TEAMCITY_VERSION' in env) { return /^(9\.(0*[1-9]\d*)\.|\d{2,}\.)/.test(env.TEAMCITY_VERSION) ? 1 : 0; } if (env.COLORTERM === 'truecolor') { return 3; } if (env.TERM === 'xterm-kitty') { return 3; } if ('TERM_PROGRAM' in env) { const version = Number.parseInt((env.TERM_PROGRAM_VERSION || '').split('.')[0], 10); switch (env.TERM_PROGRAM) { case 'iTerm.app': { return version >= 3 ? 3 : 2; } case 'Apple_Terminal': { return 2; } // No default } } if (/-256(color)?$/i.test(env.TERM)) { return 2; } if (/^screen|^xterm|^vt100|^vt220|^rxvt|color|ansi|cygwin|linux/i.test(env.TERM)) { return 1; } if ('COLORTERM' in env) { return 1; } return min; } function createSupportsColor(stream, options = {}) { const level = _supportsColor(stream, { streamIsTTY: stream && stream.isTTY, ...options, }); return translateLevel(level); } const supportsColor = { stdout: createSupportsColor({isTTY: tty.isatty(1)}), stderr: createSupportsColor({isTTY: tty.isatty(2)}), }; // TODO: When targeting Node.js 16, use `String.prototype.replaceAll`. function stringReplaceAll(string, substring, replacer) { let index = string.indexOf(substring); if (index === -1) { return string; } const substringLength = substring.length; let endIndex = 0; let returnValue = ''; do { returnValue += string.slice(endIndex, index) + substring + replacer; endIndex = index + substringLength; index = string.indexOf(substring, endIndex); } while (index !== -1); returnValue += string.slice(endIndex); return returnValue; } function stringEncaseCRLFWithFirstIndex(string, prefix, postfix, index) { let endIndex = 0; let returnValue = ''; do { const gotCR = string[index - 1] === '\r'; returnValue += string.slice(endIndex, (gotCR ? index - 1 : index)) + prefix + (gotCR ? '\r\n' : '\n') + postfix; endIndex = index + 1; index = string.indexOf('\n', endIndex); } while (index !== -1); returnValue += string.slice(endIndex); return returnValue; } const {stdout: stdoutColor, stderr: stderrColor} = supportsColor; const GENERATOR = Symbol('GENERATOR'); const STYLER = Symbol('STYLER'); const IS_EMPTY = Symbol('IS_EMPTY'); // `supportsColor.level` → `ansiStyles.color[name]` mapping const levelMapping = [ 'ansi', 'ansi', 'ansi256', 'ansi16m', ]; const styles = Object.create(null); const applyOptions = (object, options = {}) => { if (options.level && !(Number.isInteger(options.level) && options.level >= 0 && options.level <= 3)) { throw new Error('The `level` option should be an integer from 0 to 3'); } // Detect level if not set manually const colorLevel = stdoutColor ? stdoutColor.level : 0; object.level = options.level === undefined ? colorLevel : options.level; }; const chalkFactory = options => { const chalk = (...strings) => strings.join(' '); applyOptions(chalk, options); Object.setPrototypeOf(chalk, createChalk.prototype); return chalk; }; function createChalk(options) { return chalkFactory(options); } Object.setPrototypeOf(createChalk.prototype, Function.prototype); for (const [styleName, style] of Object.entries(ansiStyles)) { styles[styleName] = { get() { const builder = createBuilder(this, createStyler(style.open, style.close, this[STYLER]), this[IS_EMPTY]); Object.defineProperty(this, styleName, {value: builder}); return builder; }, }; } styles.visible = { get() { const builder = createBuilder(this, this[STYLER], true); Object.defineProperty(this, 'visible', {value: builder}); return builder; }, }; const getModelAnsi = (model, level, type, ...arguments_) => { if (model === 'rgb') { if (level === 'ansi16m') { return ansiStyles[type].ansi16m(...arguments_); } if (level === 'ansi256') { return ansiStyles[type].ansi256(ansiStyles.rgbToAnsi256(...arguments_)); } return ansiStyles[type].ansi(ansiStyles.rgbToAnsi(...arguments_)); } if (model === 'hex') { return getModelAnsi('rgb', level, type, ...ansiStyles.hexToRgb(...arguments_)); } return ansiStyles[type][model](...arguments_); }; const usedModels = ['rgb', 'hex', 'ansi256']; for (const model of usedModels) { styles[model] = { get() { const {level} = this; return function (...arguments_) { const styler = createStyler(getModelAnsi(model, levelMapping[level], 'color', ...arguments_), ansiStyles.color.close, this[STYLER]); return createBuilder(this, styler, this[IS_EMPTY]); }; }, }; const bgModel = 'bg' + model[0].toUpperCase() + model.slice(1); styles[bgModel] = { get() { const {level} = this; return function (...arguments_) { const styler = createStyler(getModelAnsi(model, levelMapping[level], 'bgColor', ...arguments_), ansiStyles.bgColor.close, this[STYLER]); return createBuilder(this, styler, this[IS_EMPTY]); }; }, }; } const proto = Object.defineProperties(() => {}, { ...styles, level: { enumerable: true, get() { return this[GENERATOR].level; }, set(level) { this[GENERATOR].level = level; }, }, }); const createStyler = (open, close, parent) => { let openAll; let closeAll; if (parent === undefined) { openAll = open; closeAll = close; } else { openAll = parent.openAll + open; closeAll = close + parent.closeAll; } return { open, close, openAll, closeAll, parent, }; }; const createBuilder = (self, _styler, _isEmpty) => { // Single argument is hot path, implicit coercion is faster than anything // eslint-disable-next-line no-implicit-coercion const builder = (...arguments_) => applyStyle(builder, (arguments_.length === 1) ? ('' + arguments_[0]) : arguments_.join(' ')); // We alter the prototype because we must return a function, but there is // no way to create a function with a different prototype Object.setPrototypeOf(builder, proto); builder[GENERATOR] = self; builder[STYLER] = _styler; builder[IS_EMPTY] = _isEmpty; return builder; }; const applyStyle = (self, string) => { if (self.level <= 0 || !string) { return self[IS_EMPTY] ? '' : string; } let styler = self[STYLER]; if (styler === undefined) { return string; } const {openAll, closeAll} = styler; if (string.includes('\u001B')) { while (styler !== undefined) { // Replace any instances already present with a re-opening code // otherwise only the part of the string until said closing code // will be colored, and the rest will simply be 'plain'. string = stringReplaceAll(string, styler.close, styler.open); styler = styler.parent; } } // We can move both next actions out of loop, because remaining actions in loop won't have // any/visible effect on parts we add here. Close the styling before a linebreak and reopen // after next line to fix a bleed issue on macOS: https://github.com/chalk/chalk/pull/92 const lfIndex = string.indexOf('\n'); if (lfIndex !== -1) { string = stringEncaseCRLFWithFirstIndex(string, closeAll, openAll, lfIndex); } return openAll + string + closeAll; }; Object.defineProperties(createChalk.prototype, styles); const chalk = createChalk(); createChalk({level: stderrColor ? stderrColor.level : 0}); const log = { info(...args) { console.error(chalk.green.bold("[INFO]:", ...args)); }, warn(...args) { console.error(chalk.yellow.bold("[WARN]:", ...args)); }, error(...args) { console.error(chalk.red.bold("[ERROR]:", ...args)); }, }; async function getCache(cachePath) { const res = new Map(); try { if (!fs.existsSync(cachePath)) { return res; } const cache = await fs.promises.readFile(cachePath, { encoding: "utf8" }); const cacheArr = JSON.parse(cache); for (const key of Object.keys(cacheArr)) { res.set(key, cacheArr[key]); } } catch (error) { log.error(`read cache file fail ! cachePath:${cachePath}, error:${error}`); } return res; } async function writeCache(cachePath, caches) { const cacheObj = {}; for (const [key, value] of caches) { cacheObj[key] = value; } try { const dir = cachePath.split("/").slice(0, -1).join("/"); if (!fs.existsSync(cachePath)) { await fs.promises.mkdir(dir, { recursive: true }); } await fs.promises.writeFile(cachePath, JSON.stringify(cacheObj), { encoding: "utf8", }); } catch (error) { log.error(`write cache file fail ! cachePath:${cachePath}, error:${error}`); } } function isFunction$1(val) { return typeof val === "function"; } function isString(val) { return typeof val === "string"; } function getProjectRootPath() { return process.cwd(); } function normalizedPath(filePath) { return path.posix.normalize(filePath).replace(/\\/g, "/"); } function getUniqueId() { return Math.random().toString(36).slice(2, 10); } const nodeImgRelativePathRegex = /^[^?hpst].+\.(png|jpg|jpeg|svg|gif)$/im; const imgRelativePathRegex = /(?!https?:\/\/)[^(]*\.(png|jpg|jpeg|svg|gif)/gm; const articleUniqueIdRegex = /id:\s+(\w+)$/im; function createVisitor(tree) { return function visitor(testOrVisitor, visitorOrReverse, maybeReverse) { let reverse; let vt; let test; if (typeof testOrVisitor === "function" && typeof visitorOrReverse !== "function") { test = undefined; vt = testOrVisitor; reverse = visitorOrReverse; } else { test = testOrVisitor; vt = visitorOrReverse; reverse = maybeReverse; } visit(tree, test, vt, reverse); }; } function pickRelativeImgNode(visit) { const matchNodes = []; visit("image", async (_node, _index, parent) => { let { url } = _node; if (url) { matchNodes.push({ node: _node, parent: parent }); url = decodeURIComponent(url); const regex = nodeImgRelativePathRegex; if (regex.test(url)) { matchNodes.push({ node: _node, parent: parent, }); } } }); return matchNodes; } function replaceImgUrlToCDN(visit, cdn_prefix, res_domain) { const regex = new RegExp(`/https://(${res_domain})/(.*?)/(.*?)/(.*?)(.png|.jpg|jpeg|svg|jif)`, "im"); visit("image", (node) => { if (node.url && regex.test(node.url)) { regex.lastIndex = 0; const match = regex.exec(node.url); const [, , p3, p4, p5, p6] = match; const cdnUrl = `${cdn_prefix}/${p3}/${p4}@${p5}${p6}`; node.url = cdnUrl; } }); } function pickTagNode(visit) { const tags = []; visit("list", (_node, index, parent) => { const node = _node; const prevSibling = parent?.children[Number(index) - 1]; if (prevSibling && prevSibling?.children && prevSibling.children[0].value === "tags:") { node.children.forEach((child) => { tags.push(child.children[0].children[0].value); }); return true; } }); return tags; } /** * Remove article description part * @param visit */ function removeArticleDescPart(visit) { visit("heading", (_node, _index, parent) => { const node = _node; if (parent && node.depth === 1) { parent.children.splice(0, (_index ?? 0) + 1); return true; } }); } function removeTitle(visit) { visit("heading", (_node, _index, parent) => { const node = _node; if (parent && node.depth === 1) { parent.children.splice(0, (_index ?? 0) + 1); return true; } }); } class PostMapRecorder { filePath; postMapRecords; constructor(filePath) { this.filePath = filePath; this.postMapRecords = this.getPostMapRecord(); } getPostMapRecord() { let postMapRecords = {}; if (fs.existsSync(this.filePath)) { postMapRecords = JSON.parse(fs.readFileSync(this.filePath, { encoding: "utf8" })); } return this.convertRecordStructure(postMapRecords); } getPostRecord(articleUniqueID) { if (!articleUniqueID) { return null; } if (!this.postMapRecords[articleUniqueID]) { this.postMapRecords[articleUniqueID] = {}; } return this.postMapRecords[articleUniqueID]; } convertRecordStructure(postMapRecords) { const newPostMapRecords = {}; for (const articleUniqueID in postMapRecords) { if (postMapRecords[articleUniqueID] && !isString(postMapRecords[articleUniqueID])) { newPostMapRecords[articleUniqueID] = {}; for (const platformName of Object.keys(postMapRecords[articleUniqueID])) { newPostMapRecords[articleUniqueID][platformName] = { k: postMapRecords[articleUniqueID][platformName], }; } } else { newPostMapRecords[articleUniqueID] = this.decode(postMapRecords[articleUniqueID]); } } return newPostMapRecords; } decode(record) { const newRecord = {}; if (record) { const platformArr = record.split(";"); for (const platformData of platformArr) { const params = platformData.split(","); const data = {}; for (const param of params) { const [key, value] = param.split(":"); data[key] = value; } const platformName = data["p"]; delete data["p"]; if (platformName) { newRecord[platformName] = { ...data, }; } } } return newRecord; } encode(record) { let recordStr = ""; if (record) { for (const key in record) { const paramStr = this.encodeParams(record[key]); recordStr += `p:${key}`; if (paramStr && paramStr.length > 0) { recordStr += `,${paramStr}`; } recordStr += ";"; } } return recordStr; } encodeParams(data) { let paramStr = ""; const keys = Object.keys(data); for (let i = 0; i < keys.length; i++) { const key = keys[i]; paramStr += i === keys.length - 1 ? `${key}:${data[key]}` : `${key}:${data[key]},`; } return paramStr; } addOrUpdate(articleUniqueID, platformName, pid) { if (!platformName) { throw new Error("Platform name is required"); } if (!this.postMapRecords[articleUniqueID]) { this.postMapRecords[articleUniqueID] = {}; } if (!this.postMapRecords[articleUniqueID][platformName]) { this.postMapRecords[articleUniqueID][platformName] = { k: pid, }; } else if (pid) { this.postMapRecords[articleUniqueID][platformName] = { ...this.postMapRecords[articleUniqueID][platformName], k: pid, }; } } getRecords() { const records = new Map(); for (const articleUniqueID of Object.keys(this.postMapRecords)) { const record = this.encode(this.postMapRecords[articleUniqueID]); records.set(articleUniqueID, record); } return records.size > 0 ? Object.fromEntries(records.entries()) : null; } solidToNative() { const records = this.getRecords(); if (records) { fs.writeFileSync(this.filePath, JSON.stringify(records, null, 2), { encoding: "utf8" }); } } } // This file was generated. Do not modify manually! var astralIdentifierCodes = [509, 0, 227, 0, 150, 4, 294, 9, 1368, 2, 2, 1, 6, 3, 41, 2, 5, 0, 166, 1, 574, 3, 9, 9, 370, 1, 81, 2, 71, 10, 50, 3, 123, 2, 54, 14, 32, 10, 3, 1, 11, 3, 46, 10, 8, 0, 46, 9, 7, 2, 37, 13, 2, 9, 6, 1, 45, 0, 13, 2, 49, 13, 9, 3, 2, 11, 83, 11, 7, 0, 3, 0, 158, 11, 6, 9, 7, 3, 56, 1, 2, 6, 3, 1, 3, 2, 10, 0, 11, 1, 3, 6, 4, 4, 193, 17, 10, 9, 5, 0, 82, 19, 13, 9, 214, 6, 3, 8, 28, 1, 83, 16, 16, 9, 82, 12, 9, 9, 84, 14, 5, 9, 243, 14, 166, 9, 71, 5, 2, 1, 3, 3, 2, 0, 2, 1, 13, 9, 120, 6, 3, 6, 4, 0, 29, 9, 41, 6, 2, 3, 9, 0, 10, 10, 47, 15, 406, 7, 2, 7, 17, 9, 57, 21, 2, 13, 123, 5, 4, 0, 2, 1, 2, 6, 2, 0, 9, 9, 49, 4, 2, 1, 2, 4, 9, 9, 330, 3, 10, 1, 2, 0, 49, 6, 4, 4, 14, 9, 5351, 0, 7, 14, 13835, 9, 87, 9, 39, 4, 60, 6, 26, 9, 1014, 0, 2, 54, 8, 3, 82, 0, 12, 1, 19628, 1, 4706, 45, 3, 22, 543, 4, 4, 5, 9, 7, 3, 6, 31, 3, 149, 2, 1418, 49, 513, 54, 5, 49, 9, 0, 15, 0, 23, 4, 2, 14, 1361, 6, 2, 16, 3, 6, 2, 1, 2, 4, 101, 0, 161, 6, 10, 9, 357, 0, 62, 13, 499, 13, 983, 6, 110, 6, 6, 9, 4759, 9, 787719, 239]; // This file was generated. Do not modify manually! var astralIdentifierStartCodes = [0, 11, 2, 25, 2, 18, 2, 1, 2, 14, 3, 13, 35, 122, 70, 52, 268, 28, 4, 48, 48, 31, 14, 29, 6, 37, 11, 29, 3, 35, 5, 7, 2, 4, 43, 157, 19, 35, 5, 35, 5, 39, 9, 51, 13, 10, 2, 14, 2, 6, 2, 1, 2, 10, 2, 14, 2, 6, 2, 1, 68, 310, 10, 21, 11, 7, 25, 5, 2, 41, 2, 8, 70, 5, 3, 0, 2, 43, 2, 1, 4, 0, 3, 22, 11, 22, 10, 30, 66, 18, 2, 1, 11, 21, 11, 25, 71, 55, 7, 1, 65, 0, 16, 3, 2, 2, 2, 28, 43, 28, 4, 28, 36, 7, 2, 27, 28, 53, 11, 21, 11, 18, 14, 17, 111, 72, 56, 50, 14, 50, 14, 35, 349, 41, 7, 1, 79, 28, 11, 0, 9, 21, 43, 17, 47, 20, 28, 22, 13, 52, 58, 1, 3, 0, 14, 44, 33, 24, 27, 35, 30, 0, 3, 0, 9, 34, 4, 0, 13, 47, 15, 3, 22, 0, 2, 0, 36, 17, 2, 24, 20, 1, 64, 6, 2, 0, 2, 3, 2, 14, 2, 9, 8, 46, 39, 7, 3, 1, 3, 21, 2, 6, 2, 1, 2, 4, 4, 0, 19, 0, 13, 4, 159, 52, 19, 3, 21, 2, 31, 47, 21, 1, 2, 0, 185, 46, 42, 3, 37, 47, 21, 0, 60, 42, 14, 0, 72, 26, 38, 6, 186, 43, 117, 63, 32, 7, 3, 0, 3, 7, 2, 1, 2, 23, 16, 0, 2, 0, 95, 7, 3, 38, 17, 0, 2, 0, 29, 0, 11, 39, 8, 0, 22, 0, 12, 45, 20, 0, 19, 72, 264, 8, 2, 36, 18, 0, 50, 29, 113, 6, 2, 1, 2, 37, 22, 0, 26, 5, 2, 1, 2, 31, 15, 0, 328, 18, 16, 0, 2, 12, 2, 33, 125, 0, 80, 921, 103, 110, 18, 195, 2637, 96, 16, 1071, 18, 5, 4026, 582, 8634, 568, 8, 30, 18, 78, 18, 29, 19, 47, 17, 3, 32, 20, 6, 18, 689, 63, 129, 74, 6, 0, 67, 12, 65, 1, 2, 0, 29, 6135, 9, 1237, 43, 8, 8936, 3, 2, 6, 2, 1, 2, 290, 16, 0, 30, 2, 3, 0, 15, 3, 9, 395, 2309, 106, 6, 12, 4, 8, 8, 9, 5991, 84, 2, 70, 2, 1, 3, 0, 3, 1, 3, 3, 2, 11, 2, 0, 2, 6, 2, 64, 2, 3, 3, 7, 2, 6, 2, 27, 2, 3, 2, 4, 2, 0, 4, 6, 2, 339, 3, 24, 2, 24, 2, 30, 2, 24, 2, 30, 2, 24, 2, 30, 2, 24, 2, 30, 2, 24, 2, 7, 1845, 30, 7, 5, 262, 61, 147, 44, 11, 6, 17, 0, 322, 29, 19, 43, 485, 27, 757, 6, 2, 3, 2, 1, 2, 14, 2, 196, 60, 67, 8, 0, 1205, 3, 2, 26, 2, 1, 2, 0, 3, 0, 2, 9, 2, 3, 2, 0, 2, 0, 7, 0, 5, 0, 2, 0, 2, 0, 2, 2, 2, 1, 2, 0, 3, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 1, 2, 0, 3, 3, 2, 6, 2, 3, 2, 3, 2, 0, 2, 9, 2, 16, 6, 2, 2, 4, 2, 16, 4421, 42719, 33, 4153, 7, 221, 3, 5761, 15, 7472, 16, 621, 2467, 541, 1507, 4938, 6, 4191]; // This file was generated. Do not modify manually! var nonASCIIidentifierChars = "\u200c\u200d\xb7\u0300-\u036f\u0387\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u064b-\u0669\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7\u06e8\u06ea-\u06ed\u06f0-\u06f9\u0711\u0730-\u074a\u07a6-\u07b0\u07c0-\u07c9\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u0898-\u089f\u08ca-\u08e1\u08e3-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09cb-\u09cd\u09d7\u09e2\u09e3\u09e6-\u09ef\u09fe\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2\u0ae3\u0ae6-\u0aef\u0afa-\u0aff\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b55-\u0b57\u0b62\u0b63\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c00-\u0c04\u0c3c\u0c3e-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62\u0c63\u0c66-\u0c6f\u0c81-\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2\u0ce3\u0ce6-\u0cef\u0cf3\u0d00-\u0d03\u0d3b\u0d3c\u0d3e-\u0d44\u0d46-\u0d48\u0d4a-\u0d4d\u0d57\u0d62\u0d63\u0d66-\u0d6f\u0d81-\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2\u0df3\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0e50-\u0e59\u0eb1\u0eb4-\u0ebc\u0ec8-\u0ece\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e\u0f3f\u0f71-\u0f84\u0f86\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102b-\u103e\u1040-\u1049\u1056-\u1059\u105e-\u1060\u1062-\u1064\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u1369-\u1371\u1712-\u1715\u1732-\u1734\u1752\u1753\u1772\u1773\u17b4-\u17d3\u17dd\u17e0-\u17e9\u180b-\u180d\u180f-\u1819\u18a9\u1920-\u192b\u1930-\u193b\u1946-\u194f\u19d0-\u19da\u1a17-\u1a1b\u1a55-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1ab0-\u1abd\u1abf-\u1ace\u1b00-\u1b04\u1b34-\u1b44\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1b82\u1ba1-\u1bad\u1bb0-\u1bb9\u1be6-\u1bf3\u1c24-\u1c37\u1c40-\u1c49\u1c50-\u1c59\u1cd0-\u1cd2\u1cd4-\u1ce8\u1ced\u1cf4\u1cf7-\u1cf9\u1dc0-\u1dff\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302f\u3099\u309a\u30fb\ua620-\ua629\ua66f\ua674-\ua67d\ua69e\ua69f\ua6f0\ua6f1\ua802\ua806\ua80b\ua823-\ua827\ua82c\ua880\ua881\ua8b4-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f1\ua8ff-\ua909\ua926-\ua92d\ua947-\ua953\ua980-\ua983\ua9b3-\ua9c0\ua9d0-\ua9d9\ua9e5\ua9f0-\ua9f9\uaa29-\uaa36\uaa43\uaa4c\uaa4d\uaa50-\uaa59\uaa7b-\uaa7d\uaab0\uaab2-\uaab4\uaab7\uaab8\uaabe\uaabf\uaac1\uaaeb-\uaaef\uaaf5\uaaf6\uabe3-\uabea\uabec\uabed\uabf0-\uabf9\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f\uff65"; // This file was generated. Do not modify manually! var nonASCIIidentifierStartChars = "\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u0870-\u0887\u0889-\u088e\u08a0-\u08c9\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u09fc\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0af9\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c5d\u0c60\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cdd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d04-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e86-\u0e8a\u0e8c-\u0ea3\u0ea5\u0ea7-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u1711\u171f-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4c\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf3\u1cf5\u1cf6\u1cfa\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309b-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31bf\u31f0-\u31ff\u3400-\u4dbf\u4e00-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7ca\ua7d0\ua7d1\ua7d3\ua7d5-\ua7d9\ua7f2-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab69\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc"; // These are a run-length and offset encoded representation of the // >0xffff code points that are a valid part of identifiers. The // offset starts at 0x10000, and each pair of numbers represents an // offset to the next range, and then a size of the range. // Reserved word lists for various dialects of the language var reservedWords = { 3: "abstract boolean byte char class double enum export extends final float goto implements import int interface long native package private protected public short static super synchronized throws transient volatile", 5: "class enum extends super const export import", 6: "enum", strict: "implements interface let package private protected public static yield", strictBind: "eval arguments" }; // And the keywords var ecma5AndLessKeywords = "break case catch continue debugger default do else finally for function if return switch throw try var while with null true false instanceof typeof void delete new in this"; var keywords$1 = { 5: ecma5AndLessKeywords, "5module": ecma5AndLessKeywords + " export import", 6: ecma5AndLessKeywords + " const class extends export import super" }; var keywordRelationalOperator = /^in(stanceof)?$/; // ## Character categories var nonASCIIidentifierStart = new RegExp("[" + nonASCIIidentifierStartChars + "]"); var nonASCIIidentifier = new RegExp("[" + nonASCIIidentifierStartChars + nonASCIIidentifierChars + "]"); // This has a complexity linear to the value of the code. The // assumption is that looking up astral identifier characters is // rare. function isInAstralSet(code, set) { var pos = 0x10000; for (var i = 0; i < set.length; i += 2) { pos += set[i]; if (pos > code) { return false } pos += set[i + 1]; if (pos >= code) { return true } } return false } // Test whether a given character code starts an identifier. function isIdentifierStart(code, astral) { if (code < 65) { return code === 36 } if (code < 91) { return true } if (code < 97) { return code === 95 } if (code < 123) { return true } if (code <= 0xffff) { return code >= 0xaa && nonASCIIidentifierStart.test(String.fromCharCode(code)) } if (astral === false) { return false } return isInAstralSet(code, astralIdentifierStartCodes) } // Test whether a given character is part of an identifier. function isIdentifierChar(code, astral) { if (code < 48) { return code === 36 } if (code < 58) { return true } if (code < 65) { return false } if (code < 91) { return true } if (code < 97) { return code === 95 } if (code < 123) { return true } if (code <= 0xffff) { return code >= 0xaa && nonASCIIidentifier.test(String.fromCharCode(code)) } if (astral === false) { return false } return isInAstralSet(code, astralIdentifierStartCodes) || isInAstralSet(code, astralIdentifierCodes) } // ## Token types // The assignment of fine-grained, information-carrying type objects // allows the tokenizer to store the information it has about a // token in a way that is very cheap for the parser to look up. // All token type variables start with an underscore, to make them // easy to recognize. // The `beforeExpr` property is used to disambiguate between regular // expressions and divisions. It is set on all token types that can // be followed by an expression (thus, a slash after them would be a // regular expression). // // The `startsExpr` property is used to check if the token ends a // `yield` expression. It is set on all token types that either can // directly start an expression (like a quotation mark) or can // continue an expression (like the body of a string). // // `isLoop` marks a keyword as starting a loop, which is important // to know when parsing a label, in order to allow or disallow // continue jumps to that label. var TokenType = function TokenType(label, conf) { if ( conf === void 0 ) conf = {}; this.label = label; this.keyword = conf.keyword; this.beforeExpr = !!conf.beforeExpr; this.startsExpr = !!conf.startsExpr; this.isLoop = !!conf.isLoop; this.isAssign = !!conf.isAssign; this.prefix = !!conf.prefix; this.postfix = !!conf.postfix; this.binop = conf.binop || null; this.updateContext = null; }; function binop(name, prec) { return new TokenType(name, {beforeExpr: true, binop: prec}) } var beforeExpr = {beforeExpr: true}, startsExpr = {startsExpr: true}; // Map keyword names to token types. var keywords = {}; // Succinct definitions of keyword token types function kw(name, options) { if ( options === void 0 ) options = {}; options.keyword = name; return keywords[name] = new TokenType(name, options) } var types$1 = { num: new TokenType("num", startsExpr), regexp: new TokenType("regexp", startsExpr), string: new TokenType("string", startsExpr), name: new TokenType("name", startsExpr), privateId: new TokenType("privateId", startsExpr), eof: new TokenType("eof"), // Punctuation token types. bracketL: new TokenType("[", {beforeExpr: true, startsExpr: true}), bracketR: new TokenType("]"), braceL: new TokenType("{", {beforeExpr: true, startsExpr: true}), braceR: new TokenType("}"), parenL: new TokenType("(", {beforeExpr: true, startsExpr: true}), parenR: new TokenType(")"), comma: new TokenType(",", beforeExpr), semi: new TokenType(";", beforeExpr), colon: new TokenType(":", beforeExpr), dot: new TokenType("."), question: new TokenType("?", beforeExpr), questionDot: new TokenType("?."), arrow: new TokenType("=>", beforeExpr), template: new TokenType("template"), invalidTemplate: new TokenType("invalidTemplate"), ellipsis: new TokenType("...", beforeExpr), backQuote: new TokenType("`", startsExpr), dollarBraceL: new TokenType("${", {beforeExpr: true, startsExpr: true}), // Operators. These carry several kinds of properties to help the // parser use them properly (the presence of these properties is // what categorizes them as operators). // // `binop`, when present, specifies that this operator is a binary // operator, and will refer to its precedence. // // `prefix` and `postfix` mark the operator as a prefix or postfix // unary operator. // // `isAssign` marks all of `=`, `+=`, `-=` etcetera, which act as // binary operators with a very low precedence, that should result // in AssignmentExpression nodes. eq: new TokenType("=", {beforeExpr: true, isAssign: true}), assign: new TokenType("_=", {beforeExpr: true, isAssign: true}), incDec: new TokenType("++/--", {prefix: true, postfix: true, startsExpr: true}), prefix: new TokenType("!/~", {beforeExpr: true, prefix: true, startsExpr: true}), logicalOR: binop("||", 1), logicalAND: binop("&&", 2), bitwiseOR: binop("|", 3), bitwiseXOR: binop("^", 4), bitwiseAND: binop("&", 5), equality: binop("==/!=/===/!==", 6), relational: binop("/<=/>=", 7), bitShift: binop("<>/>>>", 8), plusMin: new TokenType("+/-", {beforeExpr: true, binop: 9, prefix: true, startsExpr: true}), modulo: binop("%", 10), star: binop("*", 10), slash: binop("/", 10), starstar: new TokenType("**", {beforeExpr: true}), coalesce: binop("??", 1), // Keyword token types. _break: kw("break"), _case: kw("case", beforeExpr), _catch: kw("catch"), _continue: kw("continue"), _debugger: kw("debugger"), _default: kw("default", beforeExpr), _do: kw("do", {isLoop: true, beforeExpr: true}), _else: kw("else", beforeExpr), _finally: kw("finally"), _for: kw("for", {isLoop: true}), _function: kw("function", startsExpr), _if: kw("if"), _return: kw("return", beforeExpr), _switch: kw("switch"), _throw: kw("throw", beforeExpr), _try: kw("try"), _var: kw("var"), _const: kw("const"), _while: kw("while", {isLoop: true}), _with: kw("with"), _new: kw("new", {beforeExpr: true, startsExpr: true}), _this: kw("this", startsExpr), _super: kw("super", startsExpr), _class: kw("class", startsExpr), _extends: kw("extends", beforeExpr), _export: kw("export"), _import: kw("import", startsExpr), _null: kw("null", startsExpr), _true: kw("true", startsExpr), _false: kw("false", startsExpr), _in: kw("in", {beforeExpr: true, binop: 7}), _instanceof: kw("instanceof", {beforeExpr: true, binop: 7}), _typeof: kw("typeof", {beforeExpr: true, prefix: true, startsExpr: true}), _void: kw("void", {beforeExpr: true, prefix: true, startsExpr: true}), _delete: kw("delete", {beforeExpr: true, prefix: true, startsExpr: true}) }; // Matches a whole line break (where CRLF is considered a single // line break). Used to count lines. var lineBreak = /\r\n?|\n|\u2028|\u2029/; var lineBreakG = new RegExp(lineBreak.source, "g"); function isNewLine(code) { return code === 10 || code === 13 || code === 0x2028 || code === 0x2029 } function nextLineBreak(code, from, end) { if ( end === void 0 ) end = code.length; for (var i = from; i < end; i++) { var next = code.charCodeAt(i); if (isNewLine(next)) { return i < end - 1 && next === 13 && code.charCodeAt(i + 1) === 10 ? i + 2 : i + 1 } } return -1 } var nonASCIIwhitespace = /[\u1680\u2000-\u200a\u202f\u205f\u3000\ufeff]/; var skipWhiteSpace = /(?:\s|\/\/.*|\/\*[^]*?\*\/)*/g; var ref = Object.prototype; var hasOwnProperty$9 = ref.hasOwnProperty; var toString = ref.toString; var hasOwn = Object.hasOwn || (function (obj, propName) { return ( hasOwnProperty$9.call(obj, propName) ); }); var isArray$1 = Array.isArray || (function (obj) { return ( toString.call(obj) === "[object Array]" ); }); var regexpCache = Object.create(null); function wordsRegexp(words) { return regexpCache[words] || (regexpCache[words] = new RegExp("^(?:" + words.replace(/ /g, "|") + ")$")) } function codePointToString(code) { // UTF-16 Decoding if (code <= 0xFFFF) { return String.fromCharCode(code) } code -= 0x10000; return String.fromCharCode((code >> 10) + 0xD800, (code & 1023) + 0xDC00) } var loneSurrogate = /(?:[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])/; // These are used when `options.locations` is on, for the // `startLoc` and `endLoc` properties. var Position = function Position(line, col) { this.line = line; this.column = col; }; Position.prototype.offset = function offset (n) { return new Position(this.line, this.column + n) }; var SourceLocation = function SourceLocation(p, start, end) { this.start = start; this.end = end; if (p.sourceFile !== null) { this.source = p.sourceFile; } }; // The `getLineInfo` function is mostly useful when the // `locations` option is off (for performance reasons) and you // want to find the line/column position for a given character // offset. `input` should be the code string that the offset refers // into. function getLineInfo(input, offset) { for (var line = 1, cur = 0;;) { var nextBreak = nextLineBreak(input, cur, offset); if (nextBreak < 0) { return new Position(line, offset - cur) } ++line; cur = nextBreak; } } // A second argument must be given to configure the parser process. // These options are recognized (only `ecmaVersion` is required): var defaultOptions = { // `ecmaVersion` indicates the ECMAScript version to parse. Must be // either 3, 5, 6 (or 2015), 7 (2016), 8 (2017), 9 (2018), 10 // (2019), 11 (2020), 12 (2021), 13 (2022), 14 (2023), or `"latest"` // (the latest version the library supports). This influences // support for strict mode, the set of reserved words, and support // for new syntax features. ecmaVersion: null, // `sourceType` indicates the mode the code should be parsed in. // Can be either `"script"` or `"module"`. This influences global // strict mode and parsing of `import` and `export` declarations. sourceType: "script", // `onInsertedSemicolon` can be a callback that will be called when // a semicolon is automatically inserted. It will be passed the // position of the inserted semicolon as an offset, and if // `locations` is enabled, it is given the location as a `{line, // column}` object as second argument. onInsertedSemicolon: null, // `onTrailingComma` is similar to `onInsertedSemicolon`, but for // trailing commas. onTrailingComma: null, // By default, reserved words are only enforced if ecmaVersion >= 5. // Set `allowReserved` to a boolean value to explicitly turn this on // an off. When this option has the value "never", reserved words // and keywords can also not be used as property names. allowReserved: null, // When enabled, a return at the top level is not considered an // error. allowReturnOutsideFunction: false, // When enabled, import/export statements are not constrained to // appearing at the top of the program, and an import.meta expression // in a script isn't considered an error. allowImportExportEverywhere: false, // By default, await identifiers are allowed to appear at the top-level scope only if ecmaVersion >= 2022. // When enabled, await identifiers are allowed to appear at the top-level scope, // but they are still not allowed in non-async functions. allowAwaitOutsideFunction: null, // When enabled, super identifiers are not constrained to // appearing in methods and do not raise an error when they appear elsewhere. allowSuperOutsideMethod: null, // When enabled, hashbang directive in the beginning of file is // allowed and treated as a line comment. Enabled by default when // `ecmaVersion` >= 2023. allowHashBang: false, // By default, the parser will verify that private properties are // only used in places where they are valid and have been declared. // Set this to false to turn such checks off. checkPrivateFields: true, // When `locations` is on, `loc` properties holding objects with // `start` and `end` properties in `{line, column}` form (with // line being 1-based and column 0-based) will be attached to the // nodes. locations: false, // A function can be passed as `onToken` option, which will // cause Acorn to call that function with object in the same // format as tokens returned from `tokenizer().getToken()`. Note // that you are not allowed to call the parser from the // callback—that will corrupt its internal state. onToken: null, // A function can be passed as `onComment` option, which will // cause Acorn to call that function with `(block, text, start, // end)` parameters whenever a comment is skipped. `block` is a // boolean indicating whether this is a block (`/* */`) comment, // `text` is the content of the comment, and `start` and `end` are // character offsets that denote the start and end of the comment. // When the `locations` option is on, two more parameters are // passed, the full `{line, column}` locations of the start and // end of the comments. Note that you are not allowed to call the // parser from the callback—that will corrupt its internal state. // When this option has an array as value, objects representing the // comments are pushed to it. onComment: null, // Nodes have their start and end characters offsets recorded in // `start` and `end` properties (directly on the node, rather than // the `loc` object, which holds line/column data. To also add a // [semi-standardized][range] `range` property holding a `[start, // end]` array with the same numbers, set the `ranges` option to // `true`. // // [range]: https://bugzilla.mozilla.org/show_bug.cgi?id=745678 ranges: false, // It is possible to parse multiple files into a single AST by // passing the tree produced by parsing the first file as // `program` option in subsequent parses. This will add the // toplevel forms of the parsed file to the `Program` (top) node // of an existing parse tree. program: null, // When `locations` is on, you can pass this to record the source // file in every node's `loc` object. sourceFile: null, // This value, if given, is stored in every node, whether // `locations` is on or off. directSourceFile: null, // When enabled, parenthesized expressions are represented by // (non-standard) ParenthesizedExpression nodes preserveParens: false }; // Interpret and default an options object var warnedAboutEcmaVersion = false; function getOptions(opts) { var options = {}; for (var opt in defaultOptions) { options[opt] = opts && hasOwn(opts, opt) ? opts[opt] : defaultOptions[opt]; } if (options.ecmaVersion === "latest") { options.ecmaVersion = 1e8; } else if (options.ecmaVersion == null) { if (!warnedAboutEcmaVersion && typeof console === "object" && console.warn) { warnedAboutEcmaVersion = true; console.warn("Since Acorn 8.0.0, options.ecmaVersion is required.\nDefaulting to 2020, but this will stop working in the future."); } options.ecmaVersion = 11; } else if (options.ecmaVersion >= 2015) { options.ecmaVersion -= 2009; } if (options.allowReserved == null) { options.allowReserved = options.ecmaVersion < 5; } if (!opts || opts.allowHashBang == null) { options.allowHashBang = options.ecmaVersion >= 14; } if (isArray$1(options.onToken)) { var tokens = options.onToken; options.onToken = function (token) { return tokens.push(token); }; } if (isArray$1(options.onComment)) { options.onComment = pushComment(options, options.onComment); } return options } function pushComment(options, array) { return function(block, text, start, end, startLoc, endLoc) { var comment = { type: block ? "Block" : "Line", value: text, start: start, end: end }; if (options.locations) { comment.loc = new SourceLocation(this, startLoc, endLoc); } if (options.ranges) { comment.range = [start, end]; } array.push(comment); } } // Each scope gets a bitset that may contain these flags var SCOPE_TOP = 1, SCOPE_FUNCTION = 2, SCOPE_ASYNC = 4, SCOPE_GENERATOR = 8, SCOPE_ARROW = 16, SCOPE_SIMPLE_CATCH = 32, SCOPE_SUPER = 64, SCOPE_DIRECT_SUPER = 128, SCOPE_CLASS_STATIC_BLOCK = 256, SCOPE_VAR = SCOPE_TOP | SCOPE_FUNCTION | SCOPE_CLASS_STATIC_BLOCK; function functionFlags(async, generator) { return SCOPE_FUNCTION | (async ? SCOPE_ASYNC : 0) | (generator ? SCOPE_GENERATOR : 0) } // Used in checkLVal* and declareName to determine the type of a binding var BIND_NONE = 0, // Not a binding BIND_VAR = 1, // Var-style binding BIND_LEXICAL = 2, // Let- or const-style binding BIND_FUNCTION = 3, // Function declaration BIND_SIMPLE_CATCH = 4, // Simple (identifier pattern) catch binding BIND_OUTSIDE = 5; // Special case for function names as bound inside the function var Parser = function Parser(options, input, startPos) { this.options = options = getOptions(options); this.sourceFile = options.sourceFile; this.keywords = wordsRegexp(keywords$1[options.ecmaVersion >= 6 ? 6 : options.sourceType === "module" ? "5module" : 5]); var reserved = ""; if (options.allowReserved !== true) { reserved = reservedWords[options.ecmaVersion >= 6 ? 6 : options.ecmaVersion === 5 ? 5 : 3]; if (options.sourceType === "module") { reserved += " await"; } } this.reservedWords = wordsRegexp(reserved); var reservedStrict = (reserved ? reserved + " " : "") + reservedWords.strict; this.reservedWordsStrict = wordsRegexp(reservedStrict); this.reservedWordsStrictBind = wordsRegexp(reservedStrict + " " + reservedWords.strictBind); this.input = String(input); // Used to signal to callers of `readWord1` whether the word // contained any escape sequences. This is needed because words with // escape sequences must not be interpreted as keywords. this.containsEsc = false; // Set up token state // The current position of the tokenizer in the input. if (startPos) { this.pos = startPos; this.lineStart = this.input.lastIndexOf("\n", startPos - 1) + 1; this.curLine = this.input.slice(0, this.lineStart).split(lineBreak).length; } else { this.pos = this.lineStart = 0; this.curLine = 1; } // Properties of the current token: // Its type this.type = types$1.eof; // For tokens that include more information than their type, the value this.value = null; // Its start and end offset this.start = this.end = this.pos; // And, if locations are used, the {line, column} object // corresponding to those offsets this.startLoc = this.endLoc = this.curPosition(); // Position information for the previous token this.lastTokEndLoc = this.lastTokStartLoc = null; this.lastTokStart = this.lastTokEnd = this.pos; // The context stack is used to superficially track syntactic // context to predict whether a regular expression is allowed in a // given position. this.context = this.initialContext(); this.exprAllowed = true; // Figure out if it's a module code. this.inModule = options.sourceType === "module"; this.strict = this.inModule || this.strictDirective(this.pos); // Used to signify the start of a potential arrow function this.potentialArrowAt = -1; this.potentialArrowInForAwait = false; // Positions to delayed-check that yield/await does not exist in default parameters. this.yieldPos = this.awaitPos = this.awaitIdentPos = 0; // Labels in scope. this.labels = []; // Thus-far undefined exports. this.undefinedExports = Object.create(null); // If enabled, skip leading hashbang line. if (this.pos === 0 && options.allowHashBang && this.input.slice(0, 2) === "#!") { this.skipLineComment(2); } // Scope tracking for duplicate variable names (see scope.js) this.scopeStack = []; this.enterScope(SCOPE_TOP); // For RegExp validation this.regexpState = null; // The stack of private names. // Each element has two properties: 'declared' and 'used'. // When it exited from the outermost class definition, all used private names must be declared. this.privateNameStack = []; }; var prototypeAccessors = { inFunction: { configurable: true },inGenerator: { configurable: true },inAsync: { configurable: true },canAwait: { configurable: true },allowSuper: { configurable: true },allowDirectSuper: { configurable: true },treatFunctionsAsVar: { configurable: true },allowNewDotTarget: { configurable: true },inClassStaticBlock: { configurable: true } }; Parser.prototype.parse = function parse () { var node = this.options.program || this.startNode(); this.nextToken(); return this.parseTopLevel(node) }; prototypeAccessors.inFunction.get = function () { return (this.currentVarScope().flags & SCOPE_FUNCTION) > 0 }; prototypeAccessors.inGenerator.get = function () { return (this.currentVarScope().flags & SCOPE_GENERATOR) > 0 && !this.currentVarScope().inClassFieldInit }; prototypeAccessors.inAsync.get = function () { return (this.currentVarScope().flags & SCOPE_ASYNC) > 0 && !this.currentVarScope().inClassFieldInit }; prototypeAccessors.canAwait.get = function () { for (var i = this.scopeStack.length - 1; i >= 0; i--) { var scope = this.scopeStack[i]; if (scope.inClassFieldInit || scope.flags & SCOPE_CLASS_STATIC_BLOCK) { return false } if (scope.flags & SCOPE_FUNCTION) { return (scope.flags & SCOPE_ASYNC) > 0 } } return (this.inModule && this.options.ecmaVersion >= 13) || this.options.allowAwaitOutsideFunction }; prototypeAccessors.allowSuper.get = function () { var ref = this.currentThisScope(); var flags = ref.flags; var inClassFieldInit = ref.inClassFieldInit; return (flags & SCOPE_SUPER) > 0 || inClassFieldInit || this.options.allowSuperOutsideMethod }; prototypeAccessors.allowDirectSuper.get = function () { return (this.currentThisScope().flags & SCOPE_DIRECT_SUPER) > 0 }; prototypeAccessors.treatFunctionsAsVar.get = function () { return this.treatFunctionsAsVarInScope(this.currentScope()) }; prototypeAccessors.allowNewDotTarget.get = function () { var ref = this.currentThisScope(); var flags = ref.flags; var inClassFieldInit = ref.inClassFieldInit; return (flags & (SCOPE_FUNCTION | SCOPE_CLASS_STATIC_BLOCK)) > 0 || inClassFieldInit }; prototypeAccessors.inClassStaticBlock.get = function () { return (this.currentVarScope().flags & SCOPE_CLASS_STATIC_BLOCK) > 0 }; Parser.extend = function extend () { var plugins = [], len = arguments.length; while ( len-- ) plugins[ len ] = arguments[ len ]; var cls = this; for (var i = 0; i < plugins.length; i++) { cls = plugins[i](cls); } return cls }; Parser.parse = function parse (input, options) { return new this(options, input).parse() }; Parser.parseExpressionAt = function parseExpressionAt (input, pos, options) { var parser = new this(options, input, pos); parser.nextToken(); return parser.parseExpression() }; Parser.tokenizer = function tokenizer (input, options) { return new this(options, input) }; Object.defineProperties( Parser.prototype, prototypeAccessors ); var pp$9 = Parser.prototype; // ## Parser utilities var literal = /^(?:'((?:\\[^]|[^'\\])*?)'|"((?:\\[^]|[^"\\])*?)")/; pp$9.strictDirective = function(start) { if (this.options.ecmaVersion < 5) { return false } for (;;) { // Try to find string literal. skipWhiteSpace.lastIndex = start; start += skipWhiteSpace.exec(this.input)[0].length; var match = literal.exec(this.input.slice(start)); if (!match) { return false } if ((match[1] || match[2]) === "use strict") { skipWhiteSpace.lastIndex = start + match[0].length; var spaceAfter = skipWhiteSpace.exec(this.input), end = spaceAfter.index + spaceAfter[0].length; var next = this.input.charAt(end); return next === ";" || next === "}" || (lineBreak.test(spaceAfter[0]) && !(/[(`.[+\-/*%<>=,?^&]/.test(next) || next === "!" && this.input.charAt(end + 1) === "=")) } start += match[0].length; // Skip semicolon, if any. skipWhiteSpace.lastIndex = start; start += skipWhiteSpace.exec(this.input)[0].length; if (this.input[start] === ";") { start++; } } }; // Predicate that tests whether the next token is of the given // type, and if yes, consumes it as a side effect. pp$9.eat = function(type) { if (this.type === type) { this.next(); return true } else { return false } }; // Tests whether parsed token is a contextual keyword. pp$9.isContextual = function(name) { return this.type === types$1.name && this.value === name && !this.containsEsc }; // Consumes contextual keyword if possible. pp$9.eatContextual = function(name) { if (!this.isContextual(name)) { return false } this.next(); return true }; // Asserts that following token is given contextual keyword. pp$9.expectContextual = function(name) { if (!this.eatContextual(name)) { this.unexpected(); } }; // Test whether a semicolon can be inserted at the current position. pp$9.canInsertSemicolon = function() { return this.type === types$1.eof || this.type === types$1.braceR || lineBreak.test(this.input.slice(this.lastTokEnd, this.start)) }; pp$9.insertSemicolon = function() { if (this.canInsertSemicolon()) { if (this.options.onInsertedSemicolon) { this.options.onInsertedSemicolon(this.lastTokEnd, this.lastTokEndLoc); } return true } }; // Consume a semicolon, or, failing that, see if we are allowed to // pretend that there is a semicolon at this position. pp$9.semicolon = function() { if (!this.eat(types$1.semi) && !this.insertSemicolon()) { this.unexpected(); } }; pp$9.afterTrailingComma = function(tokType, notNext) { if (this.type === tokType) { if (this.options.onTrailingComma) { this.options.onTrailingComma(this.lastTokStart, this.lastTokStartLoc); } if (!notNext) { this.next(); } return true } }; // Expect a token of a given type. If found, consume it, otherwise, // raise an unexpected token error. pp$9.expect = function(type) { this.eat(type) || this.unexpected(); }; // Raise an unexpected token error. pp$9.unexpected = function(pos) { this.raise(pos != null ? pos : this.start, "Unexpected token"); }; var DestructuringErrors = function DestructuringErrors() { this.shorthandAssign = this.trailingComma = this.parenthesizedAssign = this.parenthesizedBind = this.doubleProto = -1; }; pp$9.checkPatternErrors = function(refDestructuringErrors, isAssign) { if (!refDestructuringErrors) { return } if (refDestructuringErrors.trailingComma > -1) { this.raiseRecoverable(refDestructuringErrors.trailingComma, "Comma is not permitted after the rest element"); } var parens = isAssign ? refDestructuringErrors.parenthesizedAssign : refDestructuringErrors.parenthesizedBind; if (parens > -1) { this.raiseRecoverable(parens, isAssign ? "Assigning to rvalue" : "Parenthesized pattern"); } }; pp$9.checkExpressionErrors = function(refDestructuringErrors, andThrow) { if (!refDestructuringErrors) { return false } var shorthandAssign = refDestructuringErrors.shorthandAssign; var doubleProto = refDestructuringErrors.doubleProto; if (!andThrow) { return shorthandAssign >= 0 || doubleProto >= 0 } if (shorthandAssign >= 0) { this.raise(shorthandAssign, "Shorthand property assignments are valid only in destructuring patterns"); } if (doubleProto >= 0) { this.raiseRecoverable(doubleProto, "Redefinition of __proto__ property"); } }; pp$9.checkYieldAwaitInDefaultParams = function() { if (this.yieldPos && (!this.awaitPos || this.yieldPos < this.awaitPos)) { this.raise(this.yieldPos, "Yield expression cannot be a default value"); } if (this.awaitPos) { this.raise(this.awaitPos, "Await expression cannot be a default value"); } }; pp$9.isSimpleAssignTarget = function(expr) { if (expr.type === "ParenthesizedExpression") { return this.isSimpleAssignTarget(expr.expression) } return expr.type === "Identifier" || expr.type === "MemberExpression" }; var pp$8 = Parser.prototype; // ### Statement parsing // Parse a program. Initializes the parser, reads any number of // statements, and wraps them in a Program node. Optionally takes a // `program` argument. If present, the statements will be appended // to its body instead of creating a new node. pp$8.parseTopLevel = function(node) { var exports = Object.create(null); if (!node.body) { node.body = []; } while (this.type !== types$1.eof) { var stmt = this.parseStatement(null, true, exports); node.body.push(stmt); } if (this.inModule) { for (var i = 0, list = Object.keys(this.undefinedExports); i < list.length; i += 1) { var name = list[i]; this.raiseRecoverable(this.undefinedExports[name].start, ("Export '" + name + "' is not defined")); } } this.adaptDirectivePrologue(node.body); this.next(); node.sourceType = this.options.sourceType; return this.finishNode(node, "Program") }; var loopLabel = {kind: "loop"}, switchLabel = {kind: "switch"}; pp$8.isLet = function(context) { if (this.options.ecmaVersion < 6 || !this.isContextual("let")) { return false } skipWhiteSpace.lastIndex = this.pos; var skip = skipWhiteSpace.exec(this.input); var next = this.pos + skip[0].length, nextCh = this.input.charCodeAt(next); // For ambiguous cases, determine if a LexicalDeclaration (or only a // Statement) is allowed here. If context is not empty then only a Statement // is allowed. However, `let [` is an explicit negative lookahead for // ExpressionStatement, so special-case it first. if (nextCh === 91 || nextCh === 92) { return true } // '[', '\' if (context) { return false } if (nextCh === 123 || nextCh > 0xd7ff && nextCh < 0xdc00) { return true } // '{', astral if (isIdentifierStart(nextCh, true)) { var pos = next + 1; while (isIdentifierChar(nextCh = this.input.charCodeAt(pos), true)) { ++pos; } if (nextCh === 92 || nextCh > 0xd7ff && nextCh < 0xdc00) { return true } var ident = this.input.slice(next, pos); if (!keywordRelationalOperator.test(ident)) { return true } } return false }; // check 'async [no LineTerminator here] function' // - 'async /*foo*/ function' is OK. // - 'async /*\n*/ function' is invalid. pp$8.isAsyncFunction = function() { if (this.options.ecmaVersion < 8 || !this.isContextual("async")) { return false } skipWhiteSpace.lastIndex = this.pos; var skip = skipWhiteSpace.exec(this.input); var next = this.pos + skip[0].length, after; return !lineBreak.test(this.input.slice(this.pos, next)) && this.input.slice(next, next + 8) === "function" && (next + 8 === this.input.length || !(isIdentifierChar(after = this.input.charCodeAt(next + 8)) || after > 0xd7ff && after < 0xdc00)) }; // Parse a single statement. // // If expecting a statement and finding a slash operator, parse a // regular expression literal. This is to handle cases like // `if (foo) /blah/.exec(foo)`, where looking at the previous token // does not help. pp$8.parseStatement = function(context, topLevel, exports) { var starttype = this.type, node = this.startNode(), kind; if (this.isLet(context)) { starttype = types$1._var; kind = "let"; } // Most types of statements are recognized by the keyword they // start with. Many are trivial to parse, some require a bit of // complexity. switch (starttype) { case types$1._break: case types$1._continue: return this.parseBreakContinueStatement(node, starttype.keyword) case types$1._debugger: return this.parseDebuggerStatement(node) case types$1._do: return this.parseDoStatement(node) case types$1._for: return this.parseForStatement(node) case types$1._function: // Function as sole body of either an if statement or a labeled statement // works, but not when it is part of a labeled statement that is the sole // body of an if statement. if ((context && (this.strict || context !== "if" && context !== "label")) && this.options.ecmaVersion >= 6) { this.unexpected(); } return this.parseFunctionStatement(node, false, !context) case types$1._class: if (context) { this.unexpected(); } return this.parseClass(node, true) case types$1._if: return this.parseIfStatement(node) case types$1._return: return this.parseReturnStatement(node) case types$1._switch: return this.parseSwitchStatement(node) case types$1._throw: return this.parseThrowStatement(node) case types$1._try: return this.parseTryStatement(node) case types$1._const: case types$1._var: kind = kind || this.value; if (context && kind !== "var") { this.unexpected(); } return this.parseVarStatement(node, kind) case types$1._while: return this.parseWhileStatement(node) case types$1._with: return this.parseWithStatement(node) case types$1.braceL: return this.parseBlock(true, node) case types$1.semi: return this.parseEmptyStatement(node) case types$1._export: case types$1._import: if (this.options.ecmaVersion > 10 && starttype === types$1._import) { skipWhiteSpace.lastIndex = this.pos; var skip = skipWhiteSpace.exec(this.input); var next = this.pos + skip[0].length, nextCh = this.input.charCodeAt(next); if (nextCh === 40 || nextCh === 46) // '(' or '.' { return this.parseExpressionStatement(node, this.parseExpression()) } } if (!this.options.allowImportExportEverywhere) { if (!topLevel) { this.raise(this.start, "'import' and 'export' may only appear at the top level"); } if (!this.inModule) { this.raise(this.start, "'import' and 'export' may appear only with 'sourceType: module'"); } } return starttype === types$1._import ? this.parseImport(node) : this.parseExport(node, exports) // If the statement does not start with a statement keyword or a // brace, it's an ExpressionStatement or LabeledStatement. We // simply start parsing an expression, and afterwards, if the // next token is a colon and the expression was a simple // Identifier node, we switch to interpreting it as a label. default: if (this.isAsyncFunction()) { if (context) { this.unexpected(); } this.next(); return this.parseFunctionStatement(node, true, !context) } var maybeName = this.value, expr = this.parseExpression(); if (starttype === types$1.name && expr.type === "Identifier" && this.eat(types$1.colon)) { return this.parseLabeledStatement(node, maybeName, expr, context) } else { return this.parseExpressionStatement(node, expr) } } }; pp$8.parseBreakContinueStatement = function(node, keyword) { var isBreak = keyword === "break"; this.next(); if (this.eat(types$1.semi) || this.insertSemicolon()) { node.label = null; } else if (this.type !== types$1.name) { this.unexpected(); } else { node.label = this.parseIdent(); this.semicolon(); } // Verify that there is an actual destination to break or // continue to. var i = 0; for (; i < this.labels.length; ++i) { var lab = this.labels[i]; if (node.label == null || lab.name === node.label.name) { if (lab.kind != null && (isBreak || lab.kind === "loop")) { break } if (node.label && isBreak) { break } } } if (i === this.labels.length) { this.raise(node.start, "Unsyntactic " + keyword); } return this.finishNode(node, isBreak ? "BreakStatement" : "ContinueStatement") }; pp$8.parseDebuggerStatement = function(node) { this.next(); this.semicolon(); return this.finishNode(node, "DebuggerStatement") }; pp$8.parseDoStatement = function(node) { this.next(); this.labels.push(loopLabel); node.body = this.parseStatement("do"); this.labels.pop(); this.expect(types$1._while); node.test = this.parseParenExpression(); if (this.options.ecmaVersion >= 6) { this.eat(types$1.semi); } else { this.semicolon(); } return this.finishNode(node, "DoWhileStatement") }; // Disambiguating between a `for` and a `for`/`in` or `for`/`of` // loop is non-trivial. Basically, we have to parse the init `var` // statement or expression, disallowing the `in` operator (see // the second parameter to `parseExpression`), and then check // whether the next token is `in` or `of`. When there is no init // part (semicolon immediately after the opening parenthesis), it // is a regular `for` loop. pp$8.parseForStatement = function(node) { this.next(); var awaitAt = (this.options.ecmaVersion >= 9 && this.canAwait && this.eatContextual("await")) ? this.lastTokStart : -1; this.labels.push(loopLabel); this.enterScope(0); this.expect(types$1.parenL); if (this.type === types$1.semi) { if (awaitAt > -1) { this.unexpected(awaitAt); } return this.parseFor(node, null) } var isLet = this.isLet(); if (this.type === types$1._var || this.type === types$1._const || isLet) { var init$1 = this.startNode(), kind = isLet ? "let" : this.value; this.next(); this.parseVar(init$1, true, kind); this.finishNode(init$1, "VariableDeclaration"); if ((this.type === types$1._in || (this.options.ecmaVersion >= 6 && this.isContextual("of"))) && init$1.declarations.length === 1) { if (this.options.ecmaVersion >= 9) { if (this.type === types$1._in) { if (awaitAt > -1) { this.unexpected(awaitAt); } } else { node.await = awaitAt > -1; } } return this.parseForIn(node, init$1) } if (awaitAt > -1) { this.unexpected(awaitAt); } return this.parseFor(node, init$1) } var startsWithLet = this.isContextual("let"), isForOf = false; var containsEsc = this.containsEsc; var refDestructuringErrors = new DestructuringErrors; var initPos = this.start; var init = awaitAt > -1 ? this.parseExprSubscripts(refDestructuringErrors, "await") : this.parseExpression(true, refDestructuringErrors); if (this.type === types$1._in || (isForOf = this.options.ecmaVersion >= 6 && this.isContextual("of"))) { if (awaitAt > -1) { // implies `ecmaVersion >= 9` (see declaration of awaitAt) if (this.type === types$1._in) { this.unexpected(awaitAt); } node.await = true; } else if (isForOf && this.options.ecmaVersion >= 8) { if (init.start === initPos && !containsEsc && init.type === "Identifier" && init.name === "async") { this.unexpected(); } else if (this.options.ecmaVersion >= 9) { node.await = false; } } if (startsWithLet && isForOf) { this.raise(init.start, "The left-hand side of a for-of loop may not start with 'let'."); } this.toAssignable(init, false, refDestructuringErrors); this.checkLValPattern(init); return this.parseForIn(node, init) } else { this.checkExpressionErrors(refDestructuringErrors, true); } if (awaitAt > -1) { this.unexpected(awaitAt); } return this.parseFor(node, init) }; pp$8.parseFunctionStatement = function(node, isAsync, declarationPosition) { this.next(); return this.parseFunction(node, FUNC_STATEMENT | (declarationPosition ? 0 : FUNC_HANGING_STATEMENT), false, isAsync) }; pp$8.parseIfStatement = function(node) { this.next(); node.test = this.parseParenExpression(); // allow function declarations in branches, but only in non-strict mode node.consequent = this.parseStatement("if"); node.alternate = this.eat(types$1._else) ? this.parseStatement("if") : null; return this.finishNode(node, "IfStatement") }; pp$8.parseReturnStatement = function(node) { if (!this.inFunction && !this.options.allowReturnOutsideFunction) { this.raise(this.start, "'return' outside of function"); } this.next(); // In `return` (and `break`/`continue`), the keywords with // optional arguments, we eagerly look for a semicolon or the // possibility to insert one. if (this.eat(types$1.semi) || this.insertSemicolon()) { node.argument = null; } else { node.argument = this.parseExpression(); this.semicolon(); } return this.finishNode(node, "ReturnStatement") }; pp$8.parseSwitchStatement = function(node) { this.next(); node.discriminant = this.parseParenExpression(); node.cases = []; this.expect(types$1.braceL); this.labels.push(switchLabel); this.enterScope(0); // Statements under must be grouped (by label) in SwitchCase // nodes. `cur` is used to keep the node that we are currently // adding statements to. var cur; for (var sawDefault = false; this.type !== types$1.braceR;) { if (this.type === types$1._case || this.type === types$1._default) { var isCase = this.type === types$1._case; if (cur) { this.finishNode(cur, "SwitchCase"); } node.cases.push(cur = this.startNode()); cur.consequent = []; this.next(); if (isCase) { cur.test = this.parseExpression(); } else { if (sawDefault) { this.raiseRecoverable(this.lastTokStart, "Multiple default clauses"); } sawDefault = true; cur.test = null; } this.expect(types$1.colon); } else { if (!cur) { this.unexpected(); } cur.consequent.push(this.parseStatement(null)); } } this.exitScope(); if (cur) { this.finishNode(cur, "SwitchCase"); } this.next(); // Closing brace this.labels.pop(); return this.finishNode(node, "SwitchStatement") }; pp$8.parseThrowStatement = function(node) { this.next(); if (lineBreak.test(this.input.slice(this.lastTokEnd, this.start))) { this.raise(this.lastTokEnd, "Illegal newline after throw"); } node.argument = this.parseExpression(); this.semicolon(); return this.finishNode(node, "ThrowStatement") }; // Reused empty array added for node fields that are always empty. var empty$1 = []; pp$8.parseCatchClauseParam = function() { var param = this.parseBindingAtom(); var simple = param.type === "Identifier"; this.enterScope(simple ? SCOPE_SIMPLE_CATCH : 0); this.checkLValPattern(param, simple ? BIND_SIMPLE_CATCH : BIND_LEXICAL); this.expect(types$1.parenR); return param }; pp$8.parseTryStatement = function(node) { this.next(); node.block = this.parseBlock(); node.handler = null; if (this.type === types$1._catch) { var clause = this.startNode(); this.next(); if (this.eat(types$1.parenL)) { clause.param = this.parseCatchClauseParam(); } else { if (this.options.ecmaVersion < 10) { this.unexpected(); } clause.param = null; this.enterScope(0); } clause.body = this.parseBlock(false); this.exitScope(); node.handler = this.finishNode(clause, "CatchClause"); } node.finalizer = this.eat(types$1._finally) ? this.parseBlock() : null; if (!node.handler && !node.finalizer) { this.raise(node.start, "Missing catch or finally clause"); } return this.finishNode(node, "TryStatement") }; pp$8.parseVarStatement = function(node, kind, allowMissingInitializer) { this.next(); this.parseVar(node, false, kind, allowMissingInitializer); this.semicolon(); return this.finishNode(node, "VariableDeclaration") }; pp$8.parseWhileStatement = function(node) { this.next(); node.test = this.parseParenExpression(); this.labels.push(loopLabel); node.body = this.parseStatement("while"); this.labels.pop(); return this.finishNode(node, "WhileStatement") }; pp$8.parseWithStatement = function(node) { if (this.strict) { this.raise(this.start, "'with' in strict mode"); } this.next(); node.object = this.parseParenExpression(); node.body = this.parseStatement("with"); return this.finishNode(node, "WithStatement") }; pp$8.parseEmptyStatement = function(node) { this.next(); return this.finishNode(node, "EmptyStatement") }; pp$8.parseLabeledStatement = function(node, maybeName, expr, context) { for (var i$1 = 0, list = this.labels; i$1 < list.length; i$1 += 1) { var label = list[i$1]; if (label.name === maybeName) { this.raise(expr.start, "Label '" + maybeName + "' is already declared"); } } var kind = this.type.isLoop ? "loop" : this.type === types$1._switch ? "switch" : null; for (var i = this.labels.length - 1; i >= 0; i--) { var label$1 = this.labels[i]; if (label$1.statementStart === node.start) { // Update information about previous labels on this node label$1.statementStart = this.start; label$1.kind = kind; } else { break } } this.labels.push({name: maybeName, kind: kind, statementStart: this.start}); node.body = this.parseStatement(context ? context.indexOf("label") === -1 ? context + "label" : context : "label"); this.labels.pop(); node.label = expr; return this.finishNode(node, "LabeledStatement") }; pp$8.parseExpressionStatement = function(node, expr) { node.expression = expr; this.semicolon(); return this.finishNode(node, "ExpressionStatement") }; // Parse a semicolon-enclosed block of statements, handling `"use // strict"` declarations when `allowStrict` is true (used for // function bodies). pp$8.parseBlock = function(createNewLexicalScope, node, exitStrict) { if ( createNewLexicalScope === void 0 ) createNewLexicalScope = true; if ( node === void 0 ) node = this.startNode(); node.body = []; this.expect(types$1.braceL); if (createNewLexicalScope) { this.enterScope(0); } while (this.type !== types$1.braceR) { var stmt = this.parseStatement(null); node.body.push(stmt); } if (exitStrict) { this.strict = false; } this.next(); if (createNewLexicalScope) { this.exitScope(); } return this.finishNode(node, "BlockStatement") }; // Parse a regular `for` loop. The disambiguation code in // `parseStatement` will already have parsed the init statement or // expression. pp$8.parseFor = function(node, init) { node.init = init; this.expect(types$1.semi); node.test = this.type === types$1.semi ? null : this.parseExpression(); this.expect(types$1.semi); node.update = this.type === types$1.parenR ? null : this.parseExpression(); this.expect(types$1.parenR); node.body = this.parseStatement("for"); this.exitScope(); this.labels.pop(); return this.finishNode(node, "ForStatement") }; // Parse a `for`/`in` and `for`/`of` loop, which are almost // same from parser's perspective. pp$8.parseForIn = function(node, init) { var isForIn = this.type === types$1._in; this.next(); if ( init.type === "VariableDeclaration" && init.declarations[0].init != null && ( !isForIn || this.options.ecmaVersion < 8 || this.strict || init.kind !== "var" || init.declarations[0].id.type !== "Identifier" ) ) { this.raise( init.start, ((isForIn ? "for-in" : "for-of") + " loop variable declaration may not have an initializer") ); } node.left = init; node.right = isForIn ? this.parseExpression() : this.parseMaybeAssign(); this.expect(types$1.parenR); node.body = this.parseStatement("for"); this.exitScope(); this.labels.pop(); return this.finishNode(node, isForIn ? "ForInStatement" : "ForOfStatement") }; // Parse a list of variable declarations. pp$8.parseVar = function(node, isFor, kind, allowMissingInitializer) { node.declarations = []; node.kind = kind; for (;;) { var decl = this.startNode(); this.parseVarId(decl, kind); if (this.eat(types$1.eq)) { decl.init = this.parseMaybeAssign(isFor); } else if (!allowMissingInitializer && kind === "const" && !(this.type === types$1._in || (this.options.ecmaVersion >= 6 && this.isContextual("of")))) { this.unexpected(); } else if (!allowMissingInitializer && decl.id.type !== "Identifier" && !(isFor && (this.type === types$1._in || this.isContextual("of")))) { this.raise(this.lastTokEnd, "Complex binding patterns require an initialization value"); } else { decl.init = null; } node.declarations.push(this.finishNode(decl, "VariableDeclarator")); if (!this.eat(types$1.comma)) { break } } return node }; pp$8.parseVarId = function(decl, kind) { decl.id = this.parseBindingAtom(); this.checkLValPattern(decl.id, kind === "var" ? BIND_VAR : BIND_LEXICAL, false); }; var FUNC_STATEMENT = 1, FUNC_HANGING_STATEMENT = 2, FUNC_NULLABLE_ID = 4; // Parse a function declaration or literal (depending on the // `statement & FUNC_STATEMENT`). // Remove `allowExpressionBody` for 7.0.0, as it is only called with false pp$8.parseFunction = function(node, statement, allowExpressionBody, isAsync, forInit) { this.initFunction(node); if (this.options.ecmaVersion >= 9 || this.options.ecmaVersion >= 6 && !isAsync) { if (this.type === types$1.star && (statement & FUNC_HANGING_STATEMENT)) { this.unexpected(); } node.generator = this.eat(types$1.star); } if (this.options.ecmaVersion >= 8) { node.async = !!isAsync; } if (statement & FUNC_STATEMENT) { node.id = (statement & FUNC_NULLABLE_ID) && this.type !== types$1.name ? null : this.parseIdent(); if (node.id && !(statement & FUNC_HANGING_STATEMENT)) // If it is a regular function declaration in sloppy mode, then it is // subject to Annex B semantics (BIND_FUNCTION). Otherwise, the binding // mode depends on properties of the current scope (see // treatFunctionsAsVar). { this.checkLValSimple(node.id, (this.strict || node.generator || node.async) ? this.treatFunctionsAsVar ? BIND_VAR : BIND_LEXICAL : BIND_FUNCTION); } } var oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, oldAwaitIdentPos = this.awaitIdentPos; this.yieldPos = 0; this.awaitPos = 0; this.awaitIdentPos = 0; this.enterScope(functionFlags(node.async, node.generator)); if (!(statement & FUNC_STATEMENT)) { node.id = this.type === types$1.name ? this.parseIdent() : null; } this.parseFunctionParams(node); this.parseFunctionBody(node, allowExpressionBody, false, forInit); this.yieldPos = oldYieldPos; this.awaitPos = oldAwaitPos; this.awaitIdentPos = oldAwaitIdentPos; return this.finishNode(node, (statement & FUNC_STATEMENT) ? "FunctionDeclaration" : "FunctionExpression") }; pp$8.parseFunctionParams = function(node) { this.expect(types$1.parenL); node.params = this.parseBindingList(types$1.parenR, false, this.options.ecmaVersion >= 8); this.checkYieldAwaitInDefaultParams(); }; // Parse a class declaration or literal (depending on the // `isStatement` parameter). pp$8.parseClass = function(node, isStatement) { this.next(); // ecma-262 14.6 Class Definitions // A class definition is always strict mode code. var oldStrict = this.strict; this.strict = true; this.parseClassId(node, isStatement); this.parseClassSuper(node); var privateNameMap = this.enterClassBody(); var classBody = this.startNode(); var hadConstructor = false; classBody.body = []; this.expect(types$1.braceL); while (this.type !== types$1.braceR) { var element = this.parseClassElement(node.superClass !== null); if (element) { classBody.body.push(element); if (element.type === "MethodDefinition" && element.kind === "constructor") { if (hadConstructor) { this.raiseRecoverable(element.start, "Duplicate constructor in the same class"); } hadConstructor = true; } else if (element.key && element.key.type === "PrivateIdentifier" && isPrivateNameConflicted(privateNameMap, element)) { this.raiseRecoverable(element.key.start, ("Identifier '#" + (element.key.name) + "' has already been declared")); } } } this.strict = oldStrict; this.next(); node.body = this.finishNode(classBody, "ClassBody"); this.exitClassBody(); return this.finishNode(node, isStatement ? "ClassDeclaration" : "ClassExpression") }; pp$8.parseClassElement = function(constructorAllowsSuper) { if (this.eat(types$1.semi)) { return null } var ecmaVersion = this.options.ecmaVersion; var node = this.startNode(); var keyName = ""; var isGenerator = false; var isAsync = false; var kind = "method"; var isStatic = false; if (this.eatContextual("static")) { // Parse static init block if (ecmaVersion >= 13 && this.eat(types$1.braceL)) { this.parseClassStaticBlock(node); return node } if (this.isClassElementNameStart() || this.type === types$1.star) { isStatic = true; } else { keyName = "static"; } } node.static = isStatic; if (!keyName && ecmaVersion >= 8 && this.eatContextual("async")) { if ((this.isClassElementNameStart() || this.type === types$1.star) && !this.canInsertSemicolon()) { isAsync = true; } else { keyName = "async"; } } if (!keyName && (ecmaVersion >= 9 || !isAsync) && this.eat(types$1.star)) { isGenerator = true; } if (!keyName && !isAsync && !isGenerator) { var lastValue = this.value; if (this.eatContextual("get") || this.eatContextual("set")) { if (this.isClassElementNameStart()) { kind = lastValue; } else { keyName = lastValue; } } } // Parse element name if (keyName) { // 'async', 'get', 'set', or 'static' were not a keyword contextually. // The last token is any of those. Make it the element name. node.computed = false; node.key = this.startNodeAt(this.lastTokStart, this.lastTokStartLoc); node.key.name = keyName; this.finishNode(node.key, "Identifier"); } else { this.parseClassElementName(node); } // Parse element value if (ecmaVersion < 13 || this.type === types$1.parenL || kind !== "method" || isGenerator || isAsync) { var isConstructor = !node.static && checkKeyName(node, "constructor"); var allowsDirectSuper = isConstructor && constructorAllowsSuper; // Couldn't move this check into the 'parseClassMethod' method for backward compatibility. if (isConstructor && kind !== "method") { this.raise(node.key.start, "Constructor can't have get/set modifier"); } node.kind = isConstructor ? "constructor" : kind; this.parseClassMethod(node, isGenerator, isAsync, allowsDirectSuper); } else { this.parseClassField(node); } return node }; pp$8.isClassElementNameStart = function() { return ( this.type === types$1.name || this.type === types$1.privateId || this.type === types$1.num || this.type === types$1.string || this.type === types$1.bracketL || this.type.keyword ) }; pp$8.parseClassElementName = function(element) { if (this.type === types$1.privateId) { if (this.value === "constructor") { this.raise(this.start, "Classes can't have an element named '#constructor'"); } element.computed = false; element.key = this.parsePrivateIdent(); } else { this.parsePropertyName(element); } }; pp$8.parseClassMethod = function(method, isGenerator, isAsync, allowsDirectSuper) { // Check key and flags var key = method.key; if (method.kind === "constructor") { if (isGenerator) { this.raise(key.start, "Constructor can't be a generator"); } if (isAsync) { this.raise(key.start, "Constructor can't be an async method"); } } else if (method.static && checkKeyName(method, "prototype")) { this.raise(key.start, "Classes may not have a static property named prototype"); } // Parse value var value = method.value = this.parseMethod(isGenerator, isAsync, allowsDirectSuper); // Check value if (method.kind === "get" && value.params.length !== 0) { this.raiseRecoverable(value.start, "getter should have no params"); } if (method.kind === "set" && value.params.length !== 1) { this.raiseRecoverable(value.start, "setter should have exactly one param"); } if (method.kind === "set" && value.params[0].type === "RestElement") { this.raiseRecoverable(value.params[0].start, "Setter cannot use rest params"); } return this.finishNode(method, "MethodDefinition") }; pp$8.parseClassField = function(field) { if (checkKeyName(field, "constructor")) { this.raise(field.key.start, "Classes can't have a field named 'constructor'"); } else if (field.static && checkKeyName(field, "prototype")) { this.raise(field.key.start, "Classes can't have a static field named 'prototype'"); } if (this.eat(types$1.eq)) { // To raise SyntaxError if 'arguments' exists in the initializer. var scope = this.currentThisScope(); var inClassFieldInit = scope.inClassFieldInit; scope.inClassFieldInit = true; field.value = this.parseMaybeAssign(); scope.inClassFieldInit = inClassFieldInit; } else { field.value = null; } this.semicolon(); return this.finishNode(field, "PropertyDefinition") }; pp$8.parseClassStaticBlock = function(node) { node.body = []; var oldLabels = this.labels; this.labels = []; this.enterScope(SCOPE_CLASS_STATIC_BLOCK | SCOPE_SUPER); while (this.type !== types$1.braceR) { var stmt = this.parseStatement(null); node.body.push(stmt); } this.next(); this.exitScope(); this.labels = oldLabels; return this.finishNode(node, "StaticBlock") }; pp$8.parseClassId = function(node, isStatement) { if (this.type === types$1.name) { node.id = this.parseIdent(); if (isStatement) { this.checkLValSimple(node.id, BIND_LEXICAL, false); } } else { if (isStatement === true) { this.unexpected(); } node.id = null; } }; pp$8.parseClassSuper = function(node) { node.superClass = this.eat(types$1._extends) ? this.parseExprSubscripts(null, false) : null; }; pp$8.enterClassBody = function() { var element = {declared: Object.create(null), used: []}; this.privateNameStack.push(element); return element.declared }; pp$8.exitClassBody = function() { var ref = this.privateNameStack.pop(); var declared = ref.declared; var used = ref.used; if (!this.options.checkPrivateFields) { return } var len = this.privateNameStack.length; var parent = len === 0 ? null : this.privateNameStack[len - 1]; for (var i = 0; i < used.length; ++i) { var id = used[i]; if (!hasOwn(declared, id.name)) { if (parent) { parent.used.push(id); } else { this.raiseRecoverable(id.start, ("Private field '#" + (id.name) + "' must be declared in an enclosing class")); } } } }; function isPrivateNameConflicted(privateNameMap, element) { var name = element.key.name; var curr = privateNameMap[name]; var next = "true"; if (element.type === "MethodDefinition" && (element.kind === "get" || element.kind === "set")) { next = (element.static ? "s" : "i") + element.kind; } // `class { get #a(){}; static set #a(_){} }` is also conflict. if ( curr === "iget" && next === "iset" || curr === "iset" && next === "iget" || curr === "sget" && next === "sset" || curr === "sset" && next === "sget" ) { privateNameMap[name] = "true"; return false } else if (!curr) { privateNameMap[name] = next; return false } else { return true } } function checkKeyName(node, name) { var computed = node.computed; var key = node.key; return !computed && ( key.type === "Identifier" && key.name === name || key.type === "Literal" && key.value === name ) } // Parses module export declaration. pp$8.parseExportAllDeclaration = function(node, exports) { if (this.options.ecmaVersion >= 11) { if (this.eatContextual("as")) { node.exported = this.parseModuleExportName(); this.checkExport(exports, node.exported, this.lastTokStart); } else { node.exported = null; } } this.expectContextual("from"); if (this.type !== types$1.string) { this.unexpected(); } node.source = this.parseExprAtom(); this.semicolon(); return this.finishNode(node, "ExportAllDeclaration") }; pp$8.parseExport = function(node, exports) { this.next(); // export * from '...' if (this.eat(types$1.star)) { return this.parseExportAllDeclaration(node, exports) } if (this.eat(types$1._default)) { // export default ... this.checkExport(exports, "default", this.lastTokStart); node.declaration = this.parseExportDefaultDeclaration(); return this.finishNode(node, "ExportDefaultDeclaration") } // export var|const|let|function|class ... if (this.shouldParseExportStatement()) { node.declaration = this.parseExportDeclaration(node); if (node.declaration.type === "VariableDeclaration") { this.checkVariableExport(exports, node.declaration.declarations); } else { this.checkExport(exports, node.declaration.id, node.declaration.id.start); } node.specifiers = []; node.source = null; } else { // export { x, y as z } [from '...'] node.declaration = null; node.specifiers = this.parseExportSpecifiers(exports); if (this.eatContextual("from")) { if (this.type !== types$1.string) { this.unexpected(); } node.source = this.parseExprAtom(); } else { for (var i = 0, list = node.specifiers; i < list.length; i += 1) { // check for keywords used as local names var spec = list[i]; this.checkUnreserved(spec.local); // check if export is defined this.checkLocalExport(spec.local); if (spec.local.type === "Literal") { this.raise(spec.local.start, "A string literal cannot be used as an exported binding without `from`."); } } node.source = null; } this.semicolon(); } return this.finishNode(node, "ExportNamedDeclaration") }; pp$8.parseExportDeclaration = function(node) { return this.parseStatement(null) }; pp$8.parseExportDefaultDeclaration = function() { var isAsync; if (this.type === types$1._function || (isAsync = this.isAsyncFunction())) { var fNode = this.startNode(); this.next(); if (isAsync) { this.next(); } return this.parseFunction(fNode, FUNC_STATEMENT | FUNC_NULLABLE_ID, false, isAsync) } else if (this.type === types$1._class) { var cNode = this.startNode(); return this.parseClass(cNode, "nullableID") } else { var declaration = this.parseMaybeAssign(); this.semicolon(); return declaration } }; pp$8.checkExport = function(exports, name, pos) { if (!exports) { return } if (typeof name !== "string") { name = name.type === "Identifier" ? name.name : name.value; } if (hasOwn(exports, name)) { this.raiseRecoverable(pos, "Duplicate export '" + name + "'"); } exports[name] = true; }; pp$8.checkPatternExport = function(exports, pat) { var type = pat.type; if (type === "Identifier") { this.checkExport(exports, pat, pat.start); } else if (type === "ObjectPattern") { for (var i = 0, list = pat.properties; i < list.length; i += 1) { var prop = list[i]; this.checkPatternExport(exports, prop); } } else if (type === "ArrayPattern") { for (var i$1 = 0, list$1 = pat.elements; i$1 < list$1.length; i$1 += 1) { var elt = list$1[i$1]; if (elt) { this.checkPatternExport(exports, elt); } } } else if (type === "Property") { this.checkPatternExport(exports, pat.value); } else if (type === "AssignmentPattern") { this.checkPatternExport(exports, pat.left); } else if (type === "RestElement") { this.checkPatternExport(exports, pat.argument); } }; pp$8.checkVariableExport = function(exports, decls) { if (!exports) { return } for (var i = 0, list = decls; i < list.length; i += 1) { var decl = list[i]; this.checkPatternExport(exports, decl.id); } }; pp$8.shouldParseExportStatement = function() { return this.type.keyword === "var" || this.type.keyword === "const" || this.type.keyword === "class" || this.type.keyword === "function" || this.isLet() || this.isAsyncFunction() }; // Parses a comma-separated list of module exports. pp$8.parseExportSpecifier = function(exports) { var node = this.startNode(); node.local = this.parseModuleExportName(); node.exported = this.eatContextual("as") ? this.parseModuleExportName() : node.local; this.checkExport( exports, node.exported, node.exported.start ); return this.finishNode(node, "ExportSpecifier") }; pp$8.parseExportSpecifiers = function(exports) { var nodes = [], first = true; // export { x, y as z } [from '...'] this.expect(types$1.braceL); while (!this.eat(types$1.braceR)) { if (!first) { this.expect(types$1.comma); if (this.afterTrailingComma(types$1.braceR)) { break } } else { first = false; } nodes.push(this.parseExportSpecifier(exports)); } return nodes }; // Parses import declaration. pp$8.parseImport = function(node) { this.next(); // import '...' if (this.type === types$1.string) { node.specifiers = empty$1; node.source = this.parseExprAtom(); } else { node.specifiers = this.parseImportSpecifiers(); this.expectContextual("from"); node.source = this.type === types$1.string ? this.parseExprAtom() : this.unexpected(); } this.semicolon(); return this.finishNode(node, "ImportDeclaration") }; // Parses a comma-separated list of module imports. pp$8.parseImportSpecifier = function() { var node = this.startNode(); node.imported = this.parseModuleExportName(); if (this.eatContextual("as")) { node.local = this.parseIdent(); } else { this.checkUnreserved(node.imported); node.local = node.imported; } this.checkLValSimple(node.local, BIND_LEXICAL); return this.finishNode(node, "ImportSpecifier") }; pp$8.parseImportDefaultSpecifier = function() { // import defaultObj, { x, y as z } from '...' var node = this.startNode(); node.local = this.parseIdent(); this.checkLValSimple(node.local, BIND_LEXICAL); return this.finishNode(node, "ImportDefaultSpecifier") }; pp$8.parseImportNamespaceSpecifier = function() { var node = this.startNode(); this.next(); this.expectContextual("as"); node.local = this.parseIdent(); this.checkLValSimple(node.local, BIND_LEXICAL); return this.finishNode(node, "ImportNamespaceSpecifier") }; pp$8.parseImportSpecifiers = function() { var nodes = [], first = true; if (this.type === types$1.name) { nodes.push(this.parseImportDefaultSpecifier()); if (!this.eat(types$1.comma)) { return nodes } } if (this.type === types$1.star) { nodes.push(this.parseImportNamespaceSpecifier()); return nodes } this.expect(types$1.braceL); while (!this.eat(types$1.braceR)) { if (!first) { this.expect(types$1.comma); if (this.afterTrailingComma(types$1.braceR)) { break } } else { first = false; } nodes.push(this.parseImportSpecifier()); } return nodes }; pp$8.parseModuleExportName = function() { if (this.options.ecmaVersion >= 13 && this.type === types$1.string) { var stringLiteral = this.parseLiteral(this.value); if (loneSurrogate.test(stringLiteral.value)) { this.raise(stringLiteral.start, "An export name cannot include a lone surrogate."); } return stringLiteral } return this.parseIdent(true) }; // Set `ExpressionStatement#directive` property for directive prologues. pp$8.adaptDirectivePrologue = function(statements) { for (var i = 0; i < statements.length && this.isDirectiveCandidate(statements[i]); ++i) { statements[i].directive = statements[i].expression.raw.slice(1, -1); } }; pp$8.isDirectiveCandidate = function(statement) { return ( this.options.ecmaVersion >= 5 && statement.type === "ExpressionStatement" && statement.expression.type === "Literal" && typeof statement.expression.value === "string" && // Reject parenthesized strings. (this.input[statement.start] === "\"" || this.input[statement.start] === "'") ) }; var pp$7 = Parser.prototype; // Convert existing expression atom to assignable pattern // if possible. pp$7.toAssignable = function(node, isBinding, refDestructuringErrors) { if (this.options.ecmaVersion >= 6 && node) { switch (node.type) { case "Identifier": if (this.inAsync && node.name === "await") { this.raise(node.start, "Cannot use 'await' as identifier inside an async function"); } break case "ObjectPattern": case "ArrayPattern": case "AssignmentPattern": case "RestElement": break case "ObjectExpression": node.type = "ObjectPattern"; if (refDestructuringErrors) { this.checkPatternErrors(refDestructuringErrors, true); } for (var i = 0, list = node.properties; i < list.length; i += 1) { var prop = list[i]; this.toAssignable(prop, isBinding); // Early error: // AssignmentRestProperty[Yield, Await] : // `...` DestructuringAssignmentTarget[Yield, Await] // // It is a Syntax Error if |DestructuringAssignmentTarget| is an |ArrayLiteral| or an |ObjectLiteral|. if ( prop.type === "RestElement" && (prop.argument.type === "ArrayPattern" || prop.argument.type === "ObjectPattern") ) { this.raise(prop.argument.start, "Unexpected token"); } } break case "Property": // AssignmentProperty has type === "Property" if (node.kind !== "init") { this.raise(node.key.start, "Object pattern can't contain getter or setter"); } this.toAssignable(node.value, isBinding); break case "ArrayExpression": node.type = "ArrayPattern"; if (refDestructuringErrors) { this.checkPatternErrors(refDestructuringErrors, true); } this.toAssignableList(node.elements, isBinding); break case "SpreadElement": node.type = "RestElement"; this.toAssignable(node.argument, isBinding); if (node.argument.type === "AssignmentPattern") { this.raise(node.argument.start, "Rest elements cannot have a default value"); } break case "AssignmentExpression": if (node.operator !== "=") { this.raise(node.left.end, "Only '=' operator can be used for specifying default value."); } node.type = "AssignmentPattern"; delete node.operator; this.toAssignable(node.left, isBinding); break case "ParenthesizedExpression": this.toAssignable(node.expression, isBinding, refDestructuringErrors); break case "ChainExpression": this.raiseRecoverable(node.start, "Optional chaining cannot appear in left-hand side"); break case "MemberExpression": if (!isBinding) { break } default: this.raise(node.start, "Assigning to rvalue"); } } else if (refDestructuringErrors) { this.checkPatternErrors(refDestructuringErrors, true); } return node }; // Convert list of expression atoms to binding list. pp$7.toAssignableList = function(exprList, isBinding) { var end = exprList.length; for (var i = 0; i < end; i++) { var elt = exprList[i]; if (elt) { this.toAssignable(elt, isBinding); } } if (end) { var last = exprList[end - 1]; if (this.options.ecmaVersion === 6 && isBinding && last && last.type === "RestElement" && last.argument.type !== "Identifier") { this.unexpected(last.argument.start); } } return exprList }; // Parses spread element. pp$7.parseSpread = function(refDestructuringErrors) { var node = this.startNode(); this.next(); node.argument = this.parseMaybeAssign(false, refDestructuringErrors); return this.finishNode(node, "SpreadElement") }; pp$7.parseRestBinding = function() { var node = this.startNode(); this.next(); // RestElement inside of a function parameter must be an identifier if (this.options.ecmaVersion === 6 && this.type !== types$1.name) { this.unexpected(); } node.argument = this.parseBindingAtom(); return this.finishNode(node, "RestElement") }; // Parses lvalue (assignable) atom. pp$7.parseBindingAtom = function() { if (this.options.ecmaVersion >= 6) { switch (this.type) { case types$1.bracketL: var node = this.startNode(); this.next(); node.elements = this.parseBindingList(types$1.bracketR, true, true); return this.finishNode(node, "ArrayPattern") case types$1.braceL: return this.parseObj(true) } } return this.parseIdent() }; pp$7.parseBindingList = function(close, allowEmpty, allowTrailingComma, allowModifiers) { var elts = [], first = true; while (!this.eat(close)) { if (first) { first = false; } else { this.expect(types$1.comma); } if (allowEmpty && this.type === types$1.comma) { elts.push(null); } else if (allowTrailingComma && this.afterTrailingComma(close)) { break } else if (this.type === types$1.ellipsis) { var rest = this.parseRestBinding(); this.parseBindingListItem(rest); elts.push(rest); if (this.type === types$1.comma) { this.raiseRecoverable(this.start, "Comma is not permitted after the rest element"); } this.expect(close); break } else { elts.push(this.parseAssignableListItem(allowModifiers)); } } return elts }; pp$7.parseAssignableListItem = function(allowModifiers) { var elem = this.parseMaybeDefault(this.start, this.startLoc); this.parseBindingListItem(elem); return elem }; pp$7.parseBindingListItem = function(param) { return param }; // Parses assignment pattern around given atom if possible. pp$7.parseMaybeDefault = function(startPos, startLoc, left) { left = left || this.parseBindingAtom(); if (this.options.ecmaVersion < 6 || !this.eat(types$1.eq)) { return left } var node = this.startNodeAt(startPos, startLoc); node.left = left; node.right = this.parseMaybeAssign(); return this.finishNode(node, "AssignmentPattern") }; // The following three functions all verify that a node is an lvalue — // something that can be bound, or assigned to. In order to do so, they perform // a variety of checks: // // - Check that none of the bound/assigned-to identifiers are reserved words. // - Record name declarations for bindings in the appropriate scope. // - Check duplicate argument names, if checkClashes is set. // // If a complex binding pattern is encountered (e.g., object and array // destructuring), the entire pattern is recursively checked. // // There are three versions of checkLVal*() appropriate for different // circumstances: // // - checkLValSimple() shall be used if the syntactic construct supports // nothing other than identifiers and member expressions. Parenthesized // expressions are also correctly handled. This is generally appropriate for // constructs for which the spec says // // > It is a Syntax Error if AssignmentTargetType of [the production] is not // > simple. // // It is also appropriate for checking if an identifier is valid and not // defined elsewhere, like import declarations or function/class identifiers. // // Examples where this is used include: // a += …; // import a from '…'; // where a is the node to be checked. // // - checkLValPattern() shall be used if the syntactic construct supports // anything checkLValSimple() supports, as well as object and array // destructuring patterns. This is generally appropriate for constructs for // which the spec says // // > It is a Syntax Error if [the production] is neither an ObjectLiteral nor // > an ArrayLiteral and AssignmentTargetType of [the production] is not // > simple. // // Examples where this is used include: // (a = …); // const a = …; // try { … } catch (a) { … } // where a is the node to be checked. // // - checkLValInnerPattern() shall be used if the syntactic construct supports // anything checkLValPattern() supports, as well as default assignment // patterns, rest elements, and other constructs that may appear within an // object or array destructuring pattern. // // As a special case, function parameters also use checkLValInnerPattern(), // as they also support defaults and rest constructs. // // These functions deliberately support both assignment and binding constructs, // as the logic for both is exceedingly similar. If the node is the target of // an assignment, then bindingType should be set to BIND_NONE. Otherwise, it // should be set to the appropriate BIND_* constant, like BIND_VAR or // BIND_LEXICAL. // // If the function is called with a non-BIND_NONE bindingType, then // additionally a checkClashes object may be specified to allow checking for // duplicate argument names. checkClashes is ignored if the provided construct // is an assignment (i.e., bindingType is BIND_NONE). pp$7.checkLValSimple = function(expr, bindingType, checkClashes) { if ( bindingType === void 0 ) bindingType = BIND_NONE; var isBind = bindingType !== BIND_NONE; switch (expr.type) { case "Identifier": if (this.strict && this.reservedWordsStrictBind.test(expr.name)) { this.raiseRecoverable(expr.start, (isBind ? "Binding " : "Assigning to ") + expr.name + " in strict mode"); } if (isBind) { if (bindingType === BIND_LEXICAL && expr.name === "let") { this.raiseRecoverable(expr.start, "let is disallowed as a lexically bound name"); } if (checkClashes) { if (hasOwn(checkClashes, expr.name)) { this.raiseRecoverable(expr.start, "Argument name clash"); } checkClashes[expr.name] = true; } if (bindingType !== BIND_OUTSIDE) { this.declareName(expr.name, bindingType, expr.start); } } break case "ChainExpression": this.raiseRecoverable(expr.start, "Optional chaining cannot appear in left-hand side"); break case "MemberExpression": if (isBind) { this.raiseRecoverable(expr.start, "Binding member expression"); } break case "ParenthesizedExpression": if (isBind) { this.raiseRecoverable(expr.start, "Binding parenthesized expression"); } return this.checkLValSimple(expr.expression, bindingType, checkClashes) default: this.raise(expr.start, (isBind ? "Binding" : "Assigning to") + " rvalue"); } }; pp$7.checkLValPattern = function(expr, bindingType, checkClashes) { if ( bindingType === void 0 ) bindingType = BIND_NONE; switch (expr.type) { case "ObjectPattern": for (var i = 0, list = expr.properties; i < list.length; i += 1) { var prop = list[i]; this.checkLValInnerPattern(prop, bindingType, checkClashes); } break case "ArrayPattern": for (var i$1 = 0, list$1 = expr.elements; i$1 < list$1.length; i$1 += 1) { var elem = list$1[i$1]; if (elem) { this.checkLValInnerPattern(elem, bindingType, checkClashes); } } break default: this.checkLValSimple(expr, bindingType, checkClashes); } }; pp$7.checkLValInnerPattern = function(expr, bindingType, checkClashes) { if ( bindingType === void 0 ) bindingType = BIND_NONE; switch (expr.type) { case "Property": // AssignmentProperty has type === "Property" this.checkLValInnerPattern(expr.value, bindingType, checkClashes); break case "AssignmentPattern": this.checkLValPattern(expr.left, bindingType, checkClashes); break case "RestElement": this.checkLValPattern(expr.argument, bindingType, checkClashes); break default: this.checkLValPattern(expr, bindingType, checkClashes); } }; // The algorithm used to determine whether a regexp can appear at a // given point in the program is loosely based on sweet.js' approach. // See https://github.com/mozilla/sweet.js/wiki/design var TokContext = function TokContext(token, isExpr, preserveSpace, override, generator) { this.token = token; this.isExpr = !!isExpr; this.preserveSpace = !!preserveSpace; this.override = override; this.generator = !!generator; }; var types = { b_stat: new TokContext("{", false), b_expr: new TokContext("{", true), b_tmpl: new TokContext("${", false), p_stat: new TokContext("(", false), p_expr: new TokContext("(", true), q_tmpl: new TokContext("`", true, true, function (p) { return p.tryReadTemplateToken(); }), f_stat: new TokContext("function", false), f_expr: new TokContext("function", true), f_expr_gen: new TokContext("function", true, false, null, true), f_gen: new TokContext("function", false, false, null, true) }; var pp$6 = Parser.prototype; pp$6.initialContext = function() { return [types.b_stat] }; pp$6.curContext = function() { return this.context[this.context.length - 1] }; pp$6.braceIsBlock = function(prevType) { var parent = this.curContext(); if (parent === types.f_expr || parent === types.f_stat) { return true } if (prevType === types$1.colon && (parent === types.b_stat || parent === types.b_expr)) { return !parent.isExpr } // The check for `tt.name && exprAllowed` detects whether we are // after a `yield` or `of` construct. See the `updateContext` for // `tt.name`. if (prevType === types$1._return || prevType === types$1.name && this.exprAllowed) { return lineBreak.test(this.input.slice(this.lastTokEnd, this.start)) } if (prevType === types$1._else || prevType === types$1.semi || prevType === types$1.eof || prevType === types$1.parenR || prevType === types$1.arrow) { return true } if (prevType === types$1.braceL) { return parent === types.b_stat } if (prevType === types$1._var || prevType === types$1._const || prevType === types$1.name) { return false } return !this.exprAllowed }; pp$6.inGeneratorContext = function() { for (var i = this.context.length - 1; i >= 1; i--) { var context = this.context[i]; if (context.token === "function") { return context.generator } } return false }; pp$6.updateContext = function(prevType) { var update, type = this.type; if (type.keyword && prevType === types$1.dot) { this.exprAllowed = false; } else if (update = type.updateContext) { update.call(this, prevType); } else { this.exprAllowed = type.beforeExpr; } }; // Used to handle edge cases when token context could not be inferred correctly during tokenization phase pp$6.overrideContext = function(tokenCtx) { if (this.curContext() !== tokenCtx) { this.context[this.context.length - 1] = tokenCtx; } }; // Token-specific context update code types$1.parenR.updateContext = types$1.braceR.updateContext = function() { if (this.context.length === 1) { this.exprAllowed = true; return } var out = this.context.pop(); if (out === types.b_stat && this.curContext().token === "function") { out = this.context.pop(); } this.exprAllowed = !out.isExpr; }; types$1.braceL.updateContext = function(prevType) { this.context.push(this.braceIsBlock(prevType) ? types.b_stat : types.b_expr); this.exprAllowed = true; }; types$1.dollarBraceL.updateContext = function() { this.context.push(types.b_tmpl); this.exprAllowed = true; }; types$1.parenL.updateContext = function(prevType) { var statementParens = prevType === types$1._if || prevType === types$1._for || prevType === types$1._with || prevType === types$1._while; this.context.push(statementParens ? types.p_stat : types.p_expr); this.exprAllowed = true; }; types$1.incDec.updateContext = function() { // tokExprAllowed stays unchanged }; types$1._function.updateContext = types$1._class.updateContext = function(prevType) { if (prevType.beforeExpr && prevType !== types$1._else && !(prevType === types$1.semi && this.curContext() !== types.p_stat) && !(prevType === types$1._return && lineBreak.test(this.input.slice(this.lastTokEnd, this.start))) && !((prevType === types$1.colon || prevType === types$1.braceL) && this.curContext() === types.b_stat)) { this.context.push(types.f_expr); } else { this.context.push(types.f_stat); } this.exprAllowed = false; }; types$1.colon.updateContext = function() { if (this.curContext().token === "function") { this.context.pop(); } this.exprAllowed = true; }; types$1.backQuote.updateContext = function() { if (this.curContext() === types.q_tmpl) { this.context.pop(); } else { this.context.push(types.q_tmpl); } this.exprAllowed = false; }; types$1.star.updateContext = function(prevType) { if (prevType === types$1._function) { var index = this.context.length - 1; if (this.context[index] === types.f_expr) { this.context[index] = types.f_expr_gen; } else { this.context[index] = types.f_gen; } } this.exprAllowed = true; }; types$1.name.updateContext = function(prevType) { var allowed = false; if (this.options.ecmaVersion >= 6 && prevType !== types$1.dot) { if (this.value === "of" && !this.exprAllowed || this.value === "yield" && this.inGeneratorContext()) { allowed = true; } } this.exprAllowed = allowed; }; // A recursive descent parser operates by defining functions for all // syntactic elements, and recursively calling those, each function // advancing the input stream and returning an AST node. Precedence // of constructs (for example, the fact that `!x[1]` means `!(x[1])` // instead of `(!x)[1]` is handled by the fact that the parser // function that parses unary prefix operators is called first, and // in turn calls the function that parses `[]` subscripts — that // way, it'll receive the node for `x[1]` already parsed, and wraps // *that* in the unary operator node. // // Acorn uses an [operator precedence parser][opp] to handle binary // operator precedence, because it is much more compact than using // the technique outlined above, which uses different, nesting // functions to specify precedence, for all of the ten binary // precedence levels that JavaScript defines. // // [opp]: http://en.wikipedia.org/wiki/Operator-precedence_parser var pp$5 = Parser.prototype; // Check if property name clashes with already added. // Object/class getters and setters are not allowed to clash — // either with each other or with an init property — and in // strict mode, init properties are also not allowed to be repeated. pp$5.checkPropClash = function(prop, propHash, refDestructuringErrors) { if (this.options.ecmaVersion >= 9 && prop.type === "SpreadElement") { return } if (this.options.ecmaVersion >= 6 && (prop.computed || prop.method || prop.shorthand)) { return } var key = prop.key; var name; switch (key.type) { case "Identifier": name = key.name; break case "Literal": name = String(key.value); break default: return } var kind = prop.kind; if (this.options.ecmaVersion >= 6) { if (name === "__proto__" && kind === "init") { if (propHash.proto) { if (refDestructuringErrors) { if (refDestructuringErrors.doubleProto < 0) { refDestructuringErrors.doubleProto = key.start; } } else { this.raiseRecoverable(key.start, "Redefinition of __proto__ property"); } } propHash.proto = true; } return } name = "$" + name; var other = propHash[name]; if (other) { var redefinition; if (kind === "init") { redefinition = this.strict && other.init || other.get || other.set; } else { redefinition = other.init || other[kind]; } if (redefinition) { this.raiseRecoverable(key.start, "Redefinition of property"); } } else { other = propHash[name] = { init: false, get: false, set: false }; } other[kind] = true; }; // ### Expression parsing // These nest, from the most general expression type at the top to // 'atomic', nondivisible expression types at the bottom. Most of // the functions will simply let the function(s) below them parse, // and, *if* the syntactic construct they handle is present, wrap // the AST node that the inner parser gave them in another node. // Parse a full expression. The optional arguments are used to // forbid the `in` operator (in for loops initalization expressions) // and provide reference for storing '=' operator inside shorthand // property assignment in contexts where both object expression // and object pattern might appear (so it's possible to raise // delayed syntax error at correct position). pp$5.parseExpression = function(forInit, refDestructuringErrors) { var startPos = this.start, startLoc = this.startLoc; var expr = this.parseMaybeAssign(forInit, refDestructuringErrors); if (this.type === types$1.comma) { var node = this.startNodeAt(startPos, startLoc); node.expressions = [expr]; while (this.eat(types$1.comma)) { node.expressions.push(this.parseMaybeAssign(forInit, refDestructuringErrors)); } return this.finishNode(node, "SequenceExpression") } return expr }; // Parse an assignment expression. This includes applications of // operators like `+=`. pp$5.parseMaybeAssign = function(forInit, refDestructuringErrors, afterLeftParse) { if (this.isContextual("yield")) { if (this.inGenerator) { return this.parseYield(forInit) } // The tokenizer will assume an expression is allowed after // `yield`, but this isn't that kind of yield else { this.exprAllowed = false; } } var ownDestructuringErrors = false, oldParenAssign = -1, oldTrailingComma = -1, oldDoubleProto = -1; if (refDestructuringErrors) { oldParenAssign = refDestructuringErrors.parenthesizedAssign; oldTrailingComma = refDestructuringErrors.trailingComma; oldDoubleProto = refDestructuringErrors.doubleProto; refDestructuringErrors.parenthesizedAssign = refDestructuringErrors.trailingComma = -1; } else { refDestructuringErrors = new DestructuringErrors; ownDestructuringErrors = true; } var startPos = this.start, startLoc = this.startLoc; if (this.type === types$1.parenL || this.type === types$1.name) { this.potentialArrowAt = this.start; this.potentialArrowInForAwait = forInit === "await"; } var left = this.parseMaybeConditional(forInit, refDestructuringErrors); if (afterLeftParse) { left = afterLeftParse.call(this, left, startPos, startLoc); } if (this.type.isAssign) { var node = this.startNodeAt(startPos, startLoc); node.operator = this.value; if (this.type === types$1.eq) { left = this.toAssignable(left, false, refDestructuringErrors); } if (!ownDestructuringErrors) { refDestructuringErrors.parenthesizedAssign = refDestructuringErrors.trailingComma = refDestructuringErrors.doubleProto = -1; } if (refDestructuringErrors.shorthandAssign >= left.start) { refDestructuringErrors.shorthandAssign = -1; } // reset because shorthand default was used correctly if (this.type === types$1.eq) { this.checkLValPattern(left); } else { this.checkLValSimple(left); } node.left = left; this.next(); node.right = this.parseMaybeAssign(forInit); if (oldDoubleProto > -1) { refDestructuringErrors.doubleProto = oldDoubleProto; } return this.finishNode(node, "AssignmentExpression") } else { if (ownDestructuringErrors) { this.checkExpressionErrors(refDestructuringErrors, true); } } if (oldParenAssign > -1) { refDestructuringErrors.parenthesizedAssign = oldParenAssign; } if (oldTrailingComma > -1) { refDestructuringErrors.trailingComma = oldTrailingComma; } return left }; // Parse a ternary conditional (`?:`) operator. pp$5.parseMaybeConditional = function(forInit, refDestructuringErrors) { var startPos = this.start, startLoc = this.startLoc; var expr = this.parseExprOps(forInit, refDestructuringErrors); if (this.checkExpressionErrors(refDestructuringErrors)) { return expr } if (this.eat(types$1.question)) { var node = this.startNodeAt(startPos, startLoc); node.test = expr; node.consequent = this.parseMaybeAssign(); this.expect(types$1.colon); node.alternate = this.parseMaybeAssign(forInit); return this.finishNode(node, "ConditionalExpression") } return expr }; // Start the precedence parser. pp$5.parseExprOps = function(forInit, refDestructuringErrors) { var startPos = this.start, startLoc = this.startLoc; var expr = this.parseMaybeUnary(refDestructuringErrors, false, false, forInit); if (this.checkExpressionErrors(refDestructuringErrors)) { return expr } return expr.start === startPos && expr.type === "ArrowFunctionExpression" ? expr : this.parseExprOp(expr, startPos, startLoc, -1, forInit) }; // Parse binary operators with the operator precedence parsing // algorithm. `left` is the left-hand side of the operator. // `minPrec` provides context that allows the function to stop and // defer further parser to one of its callers when it encounters an // operator that has a lower precedence than the set it is parsing. pp$5.parseExprOp = function(left, leftStartPos, leftStartLoc, minPrec, forInit) { var prec = this.type.binop; if (prec != null && (!forInit || this.type !== types$1._in)) { if (prec > minPrec) { var logical = this.type === types$1.logicalOR || this.type === types$1.logicalAND; var coalesce = this.type === types$1.coalesce; if (coalesce) { // Handle the precedence of `tt.coalesce` as equal to the range of logical expressions. // In other words, `node.right` shouldn't contain logical expressions in order to check the mixed error. prec = types$1.logicalAND.binop; } var op = this.value; this.next(); var startPos = this.start, startLoc = this.startLoc; var right = this.parseExprOp(this.parseMaybeUnary(null, false, false, forInit), startPos, startLoc, prec, forInit); var node = this.buildBinary(leftStartPos, leftStartLoc, left, right, op, logical || coalesce); if ((logical && this.type === types$1.coalesce) || (coalesce && (this.type === types$1.logicalOR || this.type === types$1.logicalAND))) { this.raiseRecoverable(this.start, "Logical expressions and coalesce expressions cannot be mixed. Wrap either by parentheses"); } return this.parseExprOp(node, leftStartPos, leftStartLoc, minPrec, forInit) } } return left }; pp$5.buildBinary = function(startPos, startLoc, left, right, op, logical) { if (right.type === "PrivateIdentifier") { this.raise(right.start, "Private identifier can only be left side of binary expression"); } var node = this.startNodeAt(startPos, startLoc); node.left = left; node.operator = op; node.right = right; return this.finishNode(node, logical ? "LogicalExpression" : "BinaryExpression") }; // Parse unary operators, both prefix and postfix. pp$5.parseMaybeUnary = function(refDestructuringErrors, sawUnary, incDec, forInit) { var startPos = this.start, startLoc = this.startLoc, expr; if (this.isContextual("await") && this.canAwait) { expr = this.parseAwait(forInit); sawUnary = true; } else if (this.type.prefix) { var node = this.startNode(), update = this.type === types$1.incDec; node.operator = this.value; node.prefix = true; this.next(); node.argument = this.parseMaybeUnary(null, true, update, forInit); this.checkExpressionErrors(refDestructuringErrors, true); if (update) { this.checkLValSimple(node.argument); } else if (this.strict && node.operator === "delete" && isLocalVariableAccess(node.argument)) { this.raiseRecoverable(node.start, "Deleting local variable in strict mode"); } else if (node.operator === "delete" && isPrivateFieldAccess(node.argument)) { this.raiseRecoverable(node.start, "Private fields can not be deleted"); } else { sawUnary = true; } expr = this.finishNode(node, update ? "UpdateExpression" : "UnaryExpression"); } else if (!sawUnary && this.type === types$1.privateId) { if ((forInit || this.privateNameStack.length === 0) && this.options.checkPrivateFields) { this.unexpected(); } expr = this.parsePrivateIdent(); // only could be private fields in 'in', such as #x in obj if (this.type !== types$1._in) { this.unexpected(); } } else { expr = this.parseExprSubscripts(refDestructuringErrors, forInit); if (this.checkExpressionErrors(refDestructuringErrors)) { return expr } while (this.type.postfix && !this.canInsertSemicolon()) { var node$1 = this.startNodeAt(startPos, startLoc); node$1.operator = this.value; node$1.prefix = false; node$1.argument = expr; this.checkLValSimple(expr); this.next(); expr = this.finishNode(node$1, "UpdateExpression"); } } if (!incDec && this.eat(types$1.starstar)) { if (sawUnary) { this.unexpected(this.lastTokStart); } else { return this.buildBinary(startPos, startLoc, expr, this.parseMaybeUnary(null, false, false, forInit), "**", false) } } else { return expr } }; function isLocalVariableAccess(node) { return ( node.type === "Identifier" || node.type === "ParenthesizedExpression" && isLocalVariableAccess(node.expression) ) } function isPrivateFieldAccess(node) { return ( node.type === "MemberExpression" && node.property.type === "PrivateIdentifier" || node.type === "ChainExpression" && isPrivateFieldAccess(node.expression) || node.type === "ParenthesizedExpression" && isPrivateFieldAccess(node.expression) ) } // Parse call, dot, and `[]`-subscript expressions. pp$5.parseExprSubscripts = function(refDestructuringErrors, forInit) { var startPos = this.start, startLoc = this.startLoc; var expr = this.parseExprAtom(refDestructuringErrors, forInit); if (expr.type === "ArrowFunctionExpression" && this.input.slice(this.lastTokStart, this.lastTokEnd) !== ")") { return expr } var result = this.parseSubscripts(expr, startPos, startLoc, false, forInit); if (refDestructuringErrors && result.type === "MemberExpression") { if (refDestructuringErrors.parenthesizedAssign >= result.start) { refDestructuringErrors.parenthesizedAssign = -1; } if (refDestructuringErrors.parenthesizedBind >= result.start) { refDestructuringErrors.parenthesizedBind = -1; } if (refDestructuringErrors.trailingComma >= result.start) { refDestructuringErrors.trailingComma = -1; } } return result }; pp$5.parseSubscripts = function(base, startPos, startLoc, noCalls, forInit) { var maybeAsyncArrow = this.options.ecmaVersion >= 8 && base.type === "Identifier" && base.name === "async" && this.lastTokEnd === base.end && !this.canInsertSemicolon() && base.end - base.start === 5 && this.potentialArrowAt === base.start; var optionalChained = false; while (true) { var element = this.parseSubscript(base, startPos, startLoc, noCalls, maybeAsyncArrow, optionalChained, forInit); if (element.optional) { optionalChained = true; } if (element === base || element.type === "ArrowFunctionExpression") { if (optionalChained) { var chainNode = this.startNodeAt(startPos, startLoc); chainNode.expression = element; element = this.finishNode(chainNode, "ChainExpression"); } return element } base = element; } }; pp$5.shouldParseAsyncArrow = function() { return !this.canInsertSemicolon() && this.eat(types$1.arrow) }; pp$5.parseSubscriptAsyncArrow = function(startPos, startLoc, exprList, forInit) { return this.parseArrowExpression(this.startNodeAt(startPos, startLoc), exprList, true, forInit) }; pp$5.parseSubscript = function(base, startPos, startLoc, noCalls, maybeAsyncArrow, optionalChained, forInit) { var optionalSupported = this.options.ecmaVersion >= 11; var optional = optionalSupported && this.eat(types$1.questionDot); if (noCalls && optional) { this.raise(this.lastTokStart, "Optional chaining cannot appear in the callee of new expressions"); } var computed = this.eat(types$1.bracketL); if (computed || (optional && this.type !== types$1.parenL && this.type !== types$1.backQuote) || this.eat(types$1.dot)) { var node = this.startNodeAt(startPos, startLoc); node.object = base; if (computed) { node.property = this.parseExpression(); this.expect(types$1.bracketR); } else if (this.type === types$1.privateId && base.type !== "Super") { node.property = this.parsePrivateIdent(); } else { node.property = this.parseIdent(this.options.allowReserved !== "never"); } node.computed = !!computed; if (optionalSupported) { node.optional = optional; } base = this.finishNode(node, "MemberExpression"); } else if (!noCalls && this.eat(types$1.parenL)) { var refDestructuringErrors = new DestructuringErrors, oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, oldAwaitIdentPos = this.awaitIdentPos; this.yieldPos = 0; this.awaitPos = 0; this.awaitIdentPos = 0; var exprList = this.parseExprList(types$1.parenR, this.options.ecmaVersion >= 8, false, refDestructuringErrors); if (maybeAsyncArrow && !optional && this.shouldParseAsyncArrow()) { this.checkPatternErrors(refDestructuringErrors, false); this.checkYieldAwaitInDefaultParams(); if (this.awaitIdentPos > 0) { this.raise(this.awaitIdentPos, "Cannot use 'await' as identifier inside an async function"); } this.yieldPos = oldYieldPos; this.awaitPos = oldAwaitPos; this.awaitIdentPos = oldAwaitIdentPos; return this.parseSubscriptAsyncArrow(startPos, startLoc, exprList, forInit) } this.checkExpressionErrors(refDestructuringErrors, true); this.yieldPos = oldYieldPos || this.yieldPos; this.awaitPos = oldAwaitPos || this.awaitPos; this.awaitIdentPos = oldAwaitIdentPos || this.awaitIdentPos; var node$1 = this.startNodeAt(startPos, startLoc); node$1.callee = base; node$1.arguments = exprList; if (optionalSupported) { node$1.optional = optional; } base = this.finishNode(node$1, "CallExpression"); } else if (this.type === types$1.backQuote) { if (optional || optionalChained) { this.raise(this.start, "Optional chaining cannot appear in the tag of tagged template expressions"); } var node$2 = this.startNodeAt(startPos, startLoc); node$2.tag = base; node$2.quasi = this.parseTemplate({isTagged: true}); base = this.finishNode(node$2, "TaggedTemplateExpression"); } return base }; // Parse an atomic expression — either a single token that is an // expression, an expression started by a keyword like `function` or // `new`, or an expression wrapped in punctuation like `()`, `[]`, // or `{}`. pp$5.parseExprAtom = function(refDestructuringErrors, forInit, forNew) { // If a division operator appears in an expression position, the // tokenizer got confused, and we force it to read a regexp instead. if (this.type === types$1.slash) { this.readRegexp(); } var node, canBeArrow = this.potentialArrowAt === this.start; switch (this.type) { case types$1._super: if (!this.allowSuper) { this.raise(this.start, "'super' keyword outside a method"); } node = this.startNode(); this.next(); if (this.type === types$1.parenL && !this.allowDirectSuper) { this.raise(node.start, "super() call outside constructor of a subclass"); } // The `super` keyword can appear at below: // SuperProperty: // super [ Expression ] // super . IdentifierName // SuperCall: // super ( Arguments ) if (this.type !== types$1.dot && this.type !== types$1.bracketL && this.type !== types$1.parenL) { this.unexpected(); } return this.finishNode(node, "Super") case types$1._this: node = this.startNode(); this.next(); return this.finishNode(node, "ThisExpression") case types$1.name: var startPos = this.start, startLoc = this.startLoc, containsEsc = this.containsEsc; var id = this.parseIdent(false); if (this.options.ecmaVersion >= 8 && !containsEsc && id.name === "async" && !this.canInsertSemicolon() && this.eat(types$1._function)) { this.overrideContext(types.f_expr); return this.parseFunction(this.startNodeAt(startPos, startLoc), 0, false, true, forInit) } if (canBeArrow && !this.canInsertSemicolon()) { if (this.eat(types$1.arrow)) { return this.parseArrowExpression(this.startNodeAt(startPos, startLoc), [id], false, forInit) } if (this.options.ecmaVersion >= 8 && id.name === "async" && this.type === types$1.name && !containsEsc && (!this.potentialArrowInForAwait || this.value !== "of" || this.containsEsc)) { id = this.parseIdent(false); if (this.canInsertSemicolon() || !this.eat(types$1.arrow)) { this.unexpected(); } return this.parseArrowExpression(this.startNodeAt(startPos, startLoc), [id], true, forInit) } } return id case types$1.regexp: var value = this.value; node = this.parseLiteral(value.value); node.regex = {pattern: value.pattern, flags: value.flags}; return node case types$1.num: case types$1.string: return this.parseLiteral(this.value) case types$1._null: case types$1._true: case types$1._false: node = this.startNode(); node.value = this.type === types$1._null ? null : this.type === types$1._true; node.raw = this.type.keyword; this.next(); return this.finishNode(node, "Literal") case types$1.parenL: var start = this.start, expr = this.parseParenAndDistinguishExpression(canBeArrow, forInit); if (refDestructuringErrors) { if (refDestructuringErrors.parenthesizedAssign < 0 && !this.isSimpleAssignTarget(expr)) { refDestructuringErrors.parenthesizedAssign = start; } if (refDestructuringErrors.parenthesizedBind < 0) { refDestructuringErrors.parenthesizedBind = start; } } return expr case types$1.bracketL: node = this.startNode(); this.next(); node.elements = this.parseExprList(types$1.bracketR, true, true, refDestructuringErrors); return this.finishNode(node, "ArrayExpression") case types$1.braceL: this.overrideContext(types.b_expr); return this.parseObj(false, refDestructuringErrors) case types$1._function: node = this.startNode(); this.next(); return this.parseFunction(node, 0) case types$1._class: return this.parseClass(this.startNode(), false) case types$1._new: return this.parseNew() case types$1.backQuote: return this.parseTemplate() case types$1._import: if (this.options.ecmaVersion >= 11) { return this.parseExprImport(forNew) } else { return this.unexpected() } default: return this.parseExprAtomDefault() } }; pp$5.parseExprAtomDefault = function() { this.unexpected(); }; pp$5.parseExprImport = function(forNew) { var node = this.startNode(); // Consume `import` as an identifier for `import.meta`. // Because `this.parseIdent(true)` doesn't check escape sequences, it needs the check of `this.containsEsc`. if (this.containsEsc) { this.raiseRecoverable(this.start, "Escape sequence in keyword import"); } this.next(); if (this.type === types$1.parenL && !forNew) { return this.parseDynamicImport(node) } else if (this.type === types$1.dot) { var meta = this.startNodeAt(node.start, node.loc && node.loc.start); meta.name = "import"; node.meta = this.finishNode(meta, "Identifier"); return this.parseImportMeta(node) } else { this.unexpected(); } }; pp$5.parseDynamicImport = function(node) { this.next(); // skip `(` // Parse node.source. node.source = this.parseMaybeAssign(); // Verify ending. if (!this.eat(types$1.parenR)) { var errorPos = this.start; if (this.eat(types$1.comma) && this.eat(types$1.parenR)) { this.raiseRecoverable(errorPos, "Trailing comma is not allowed in import()"); } else { this.unexpected(errorPos); } } return this.finishNode(node, "ImportExpression") }; pp$5.parseImportMeta = function(node) { this.next(); // skip `.` var containsEsc = this.containsEsc; node.property = this.parseIdent(true); if (node.property.name !== "meta") { this.raiseRecoverable(node.property.start, "The only valid meta property for import is 'import.meta'"); } if (containsEsc) { this.raiseRecoverable(node.start, "'import.meta' must not contain escaped characters"); } if (this.options.sourceType !== "module" && !this.options.allowImportExportEverywhere) { this.raiseRecoverable(node.start, "Cannot use 'import.meta' outside a module"); } return this.finishNode(node, "MetaProperty") }; pp$5.parseLiteral = function(value) { var node = this.startNode(); node.value = value; node.raw = this.input.slice(this.start, this.end); if (node.raw.charCodeAt(node.raw.length - 1) === 110) { node.bigint = node.raw.slice(0, -1).replace(/_/g, ""); } this.next(); return this.finishNode(node, "Literal") }; pp$5.parseParenExpression = function() { this.expect(types$1.parenL); var val = this.parseExpression(); this.expect(types$1.parenR); return val }; pp$5.shouldParseArrow = function(exprList) { return !this.canInsertSemicolon() }; pp$5.parseParenAndDistinguishExpression = function(canBeArrow, forInit) { var startPos = this.start, startLoc = this.startLoc, val, allowTrailingComma = this.options.ecmaVersion >= 8; if (this.options.ecmaVersion >= 6) { this.next(); var innerStartPos = this.start, innerStartLoc = this.startLoc; var exprList = [], first = true, lastIsComma = false; var refDestructuringErrors = new DestructuringErrors, oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, spreadStart; this.yieldPos = 0; this.awaitPos = 0; // Do not save awaitIdentPos to allow checking awaits nested in parameters while (this.type !== types$1.parenR) { first ? first = false : this.expect(types$1.comma); if (allowTrailingComma && this.afterTrailingComma(types$1.parenR, true)) { lastIsComma = true; break } else if (this.type === types$1.ellipsis) { spreadStart = this.start; exprList.push(this.parseParenItem(this.parseRestBinding())); if (this.type === types$1.comma) { this.raiseRecoverable( this.start, "Comma is not permitted after the rest element" ); } break } else { exprList.push(this.parseMaybeAssign(false, refDestructuringErrors, this.parseParenItem)); } } var innerEndPos = this.lastTokEnd, innerEndLoc = this.lastTokEndLoc; this.expect(types$1.parenR); if (canBeArrow && this.shouldParseArrow(exprList) && this.eat(types$1.arrow)) { this.checkPatternErrors(refDestructuringErrors, false); this.checkYieldAwaitInDefaultParams(); this.yieldPos = oldYieldPos; this.awaitPos = oldAwaitPos; return this.parseParenArrowList(startPos, startLoc, exprList, forInit) } if (!exprList.length || lastIsComma) { this.unexpected(this.lastTokStart); } if (spreadStart) { this.unexpected(spreadStart); } this.checkExpressionErrors(refDestructuringErrors, true); this.yieldPos = oldYieldPos || this.yieldPos; this.awaitPos = oldAwaitPos || this.awaitPos; if (exprList.length > 1) { val = this.startNodeAt(innerStartPos, innerStartLoc); val.expressions = exprList; this.finishNodeAt(val, "SequenceExpression", innerEndPos, innerEndLoc); } else { val = exprList[0]; } } else { val = this.parseParenExpression(); } if (this.options.preserveParens) { var par = this.startNodeAt(startPos, startLoc); par.expression = val; return this.finishNode(par, "ParenthesizedExpression") } else { return val } }; pp$5.parseParenItem = function(item) { return item }; pp$5.parseParenArrowList = function(startPos, startLoc, exprList, forInit) { return this.parseArrowExpression(this.startNodeAt(startPos, startLoc), exprList, false, forInit) }; // New's precedence is slightly tricky. It must allow its argument to // be a `[]` or dot subscript expression, but not a call — at least, // not without wrapping it in parentheses. Thus, it uses the noCalls // argument to parseSubscripts to prevent it from consuming the // argument list. var empty = []; pp$5.parseNew = function() { if (this.containsEsc) { this.raiseRecoverable(this.start, "Escape sequence in keyword new"); } var node = this.startNode(); this.next(); if (this.options.ecmaVersion >= 6 && this.type === types$1.dot) { var meta = this.startNodeAt(node.start, node.loc && node.loc.start); meta.name = "new"; node.meta = this.finishNode(meta, "Identifier"); this.next(); var containsEsc = this.containsEsc; node.property = this.parseIdent(true); if (node.property.name !== "target") { this.raiseRecoverable(node.property.start, "The only valid meta property for new is 'new.target'"); } if (containsEsc) { this.raiseRecoverable(node.start, "'new.target' must not contain escaped characters"); } if (!this.allowNewDotTarget) { this.raiseRecoverable(node.start, "'new.target' can only be used in functions and class static block"); } return this.finishNode(node, "MetaProperty") } var startPos = this.start, startLoc = this.startLoc; node.callee = this.parseSubscripts(this.parseExprAtom(null, false, true), startPos, startLoc, true, false); if (this.eat(types$1.parenL)) { node.arguments = this.parseExprList(types$1.parenR, this.options.ecmaVersion >= 8, false); } else { node.arguments = empty; } return this.finishNode(node, "NewExpression") }; // Parse template expression. pp$5.parseTemplateElement = function(ref) { var isTagged = ref.isTagged; var elem = this.startNode(); if (this.type === types$1.invalidTemplate) { if (!isTagged) { this.raiseRecoverable(this.start, "Bad escape sequence in untagged template literal"); } elem.value = { raw: this.value.replace(/\r\n?/g, "\n"), cooked: null }; } else { elem.value = { raw: this.input.slice(this.start, this.end).replace(/\r\n?/g, "\n"), cooked: this.value }; } this.next(); elem.tail = this.type === types$1.backQuote; return this.finishNode(elem, "TemplateElement") }; pp$5.parseTemplate = function(ref) { if ( ref === void 0 ) ref = {}; var isTagged = ref.isTagged; if ( isTagged === void 0 ) isTagged = false; var node = this.startNode(); this.next(); node.expressions = []; var curElt = this.parseTemplateElement({isTagged: isTagged}); node.quasis = [curElt]; while (!curElt.tail) { if (this.type === types$1.eof) { this.raise(this.pos, "Unterminated template literal"); } this.expect(types$1.dollarBraceL); node.expressions.push(this.parseExpression()); this.expect(types$1.braceR); node.quasis.push(curElt = this.parseTemplateElement({isTagged: isTagged})); } this.next(); return this.finishNode(node, "TemplateLiteral") }; pp$5.isAsyncProp = function(prop) { return !prop.computed && prop.key.type === "Identifier" && prop.key.name === "async" && (this.type === types$1.name || this.type === types$1.num || this.type === types$1.string || this.type === types$1.bracketL || this.type.keyword || (this.options.ecmaVersion >= 9 && this.type === types$1.star)) && !lineBreak.test(this.input.slice(this.lastTokEnd, this.start)) }; // Parse an object literal or binding pattern. pp$5.parseObj = function(isPattern, refDestructuringErrors) { var node = this.startNode(), first = true, propHash = {}; node.properties = []; this.next(); while (!this.eat(types$1.braceR)) { if (!first) { this.expect(types$1.comma); if (this.options.ecmaVersion >= 5 && this.afterTrailingComma(types$1.braceR)) { break } } else { first = false; } var prop = this.parseProperty(isPattern, refDestructuringErrors); if (!isPattern) { this.checkPropClash(prop, propHash, refDestructuringErrors); } node.properties.push(prop); } return this.finishNode(node, isPattern ? "ObjectPattern" : "ObjectExpression") }; pp$5.parseProperty = function(isPattern, refDestructuringErrors) { var prop = this.startNode(), isGenerator, isAsync, startPos, startLoc; if (this.options.ecmaVersion >= 9 && this.eat(types$1.ellipsis)) { if (isPattern) { prop.argument = this.parseIdent(false); if (this.type === types$1.comma) { this.raiseRecoverable(this.start, "Comma is not permitted after the rest element"); } return this.finishNode(prop, "RestElement") } // Parse argument. prop.argument = this.parseMaybeAssign(false, refDestructuringErrors); // To disallow trailing comma via `this.toAssignable()`. if (this.type === types$1.comma && refDestructuringErrors && refDestructuringErrors.trailingComma < 0) { refDestructuringErrors.trailingComma = this.start; } // Finish return this.finishNode(prop, "SpreadElement") } if (this.options.ecmaVersion >= 6) { prop.method = false; prop.shorthand = false; if (isPattern || refDestructuringErrors) { startPos = this.start; startLoc = this.startLoc; } if (!isPattern) { isGenerator = this.eat(types$1.star); } } var containsEsc = this.containsEsc; this.parsePropertyName(prop); if (!isPattern && !containsEsc && this.options.ecmaVersion >= 8 && !isGenerator && this.isAsyncProp(prop)) { isAsync = true; isGenerator = this.options.ecmaVersion >= 9 && this.eat(types$1.star); this.parsePropertyName(prop); } else { isAsync = false; } this.parsePropertyValue(prop, isPattern, isGenerator, isAsync, startPos, startLoc, refDestructuringErrors, containsEsc); return this.finishNode(prop, "Property") }; pp$5.parseGetterSetter = function(prop) { prop.kind = prop.key.name; this.parsePropertyName(prop); prop.value = this.parseMethod(false); var paramCount = prop.kind === "get" ? 0 : 1; if (prop.value.params.length !== paramCount) { var start = prop.value.start; if (prop.kind === "get") { this.raiseRecoverable(start, "getter should have no params"); } else { this.raiseRecoverable(start, "setter should have exactly one param"); } } else { if (prop.kind === "set" && prop.value.params[0].type === "RestElement") { this.raiseRecoverable(prop.value.params[0].start, "Setter cannot use rest params"); } } }; pp$5.parsePropertyValue = function(prop, isPattern, isGenerator, isAsync, startPos, startLoc, refDestructuringErrors, containsEsc) { if ((isGenerator || isAsync) && this.type === types$1.colon) { this.unexpected(); } if (this.eat(types$1.colon)) { prop.value = isPattern ? this.parseMaybeDefault(this.start, this.startLoc) : this.parseMaybeAssign(false, refDestructuringErrors); prop.kind = "init"; } else if (this.options.ecmaVersion >= 6 && this.type === types$1.parenL) { if (isPattern) { this.unexpected(); } prop.kind = "init"; prop.method = true; prop.value = this.parseMethod(isGenerator, isAsync); } else if (!isPattern && !containsEsc && this.options.ecmaVersion >= 5 && !prop.computed && prop.key.type === "Identifier" && (prop.key.name === "get" || prop.key.name === "set") && (this.type !== types$1.comma && this.type !== types$1.braceR && this.type !== types$1.eq)) { if (isGenerator || isAsync) { this.unexpected(); } this.parseGetterSetter(prop); } else if (this.options.ecmaVersion >= 6 && !prop.computed && prop.key.type === "Identifier") { if (isGenerator || isAsync) { this.unexpected(); } this.checkUnreserved(prop.key); if (prop.key.name === "await" && !this.awaitIdentPos) { this.awaitIdentPos = startPos; } prop.kind = "init"; if (isPattern) { prop.value = this.parseMaybeDefault(startPos, startLoc, this.copyNode(prop.key)); } else if (this.type === types$1.eq && refDestructuringErrors) { if (refDestructuringErrors.shorthandAssign < 0) { refDestructuringErrors.shorthandAssign = this.start; } prop.value = this.parseMaybeDefault(startPos, startLoc, this.copyNode(prop.key)); } else { prop.value = this.copyNode(prop.key); } prop.shorthand = true; } else { this.unexpected(); } }; pp$5.parsePropertyName = function(prop) { if (this.options.ecmaVersion >= 6) { if (this.eat(types$1.bracketL)) { prop.computed = true; prop.key = this.parseMaybeAssign(); this.expect(types$1.bracketR); return prop.key } else { prop.computed = false; } } return prop.key = this.type === types$1.num || this.type === types$1.string ? this.parseExprAtom() : this.parseIdent(this.options.allowReserved !== "never") }; // Initialize empty function node. pp$5.initFunction = function(node) { node.id = null; if (this.options.ecmaVersion >= 6) { node.generator = node.expression = false; } if (this.options.ecmaVersion >= 8) { node.async = false; } }; // Parse object or class method. pp$5.parseMethod = function(isGenerator, isAsync, allowDirectSuper) { var node = this.startNode(), oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, oldAwaitIdentPos = this.awaitIdentPos; this.initFunction(node); if (this.options.ecmaVersion >= 6) { node.generator = isGenerator; } if (this.options.ecmaVersion >= 8) { node.async = !!isAsync; } this.yieldPos = 0; this.awaitPos = 0; this.awaitIdentPos = 0; this.enterScope(functionFlags(isAsync, node.generator) | SCOPE_SUPER | (allowDirectSuper ? SCOPE_DIRECT_SUPER : 0)); this.expect(types$1.parenL); node.params = this.parseBindingList(types$1.parenR, false, this.options.ecmaVersion >= 8); this.checkYieldAwaitInDefaultParams(); this.parseFunctionBody(node, false, true, false); this.yieldPos = oldYieldPos; this.awaitPos = oldAwaitPos; this.awaitIdentPos = oldAwaitIdentPos; return this.finishNode(node, "FunctionExpression") }; // Parse arrow function expression with given parameters. pp$5.parseArrowExpression = function(node, params, isAsync, forInit) { var oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, oldAwaitIdentPos = this.awaitIdentPos; this.enterScope(functionFlags(isAsync, false) | SCOPE_ARROW); this.initFunction(node); if (this.options.ecmaVersion >= 8) { node.async = !!isAsync; } this.yieldPos = 0; this.awaitPos = 0; this.awaitIdentPos = 0; node.params = this.toAssignableList(params, true); this.parseFunctionBody(node, true, false, forInit); this.yieldPos = oldYieldPos; this.awaitPos = oldAwaitPos; this.awaitIdentPos = oldAwaitIdentPos; return this.finishNode(node, "ArrowFunctionExpression") }; // Parse function body and check parameters. pp$5.parseFunctionBody = function(node, isArrowFunction, isMethod, forInit) { var isExpression = isArrowFunction && this.type !== types$1.braceL; var oldStrict = this.strict, useStrict = false; if (isExpression) { node.body = this.parseMaybeAssign(forInit); node.expression = true; this.checkParams(node, false); } else { var nonSimple = this.options.ecmaVersion >= 7 && !this.isSimpleParamList(node.params); if (!oldStrict || nonSimple) { useStrict = this.strictDirective(this.end); // If this is a strict mode function, verify that argument names // are not repeated, and it does not try to bind the words `eval` // or `arguments`. if (useStrict && nonSimple) { this.raiseRecoverable(node.start, "Illegal 'use strict' directive in function with non-simple parameter list"); } } // Start a new scope with regard to labels and the `inFunction` // flag (restore them to their old value afterwards). var oldLabels = this.labels; this.labels = []; if (useStrict) { this.strict = true; } // Add the params to varDeclaredNames to ensure that an error is thrown // if a let/const declaration in the function clashes with one of the params. this.checkParams(node, !oldStrict && !useStrict && !isArrowFunction && !isMethod && this.isSimpleParamList(node.params)); // Ensure the function name isn't a forbidden identifier in strict mode, e.g. 'eval' if (this.strict && node.id) { this.checkLValSimple(node.id, BIND_OUTSIDE); } node.body = this.parseBlock(false, undefined, useStrict && !oldStrict); node.expression = false; this.adaptDirectivePrologue(node.body.body); this.labels = oldLabels; } this.exitScope(); }; pp$5.isSimpleParamList = function(params) { for (var i = 0, list = params; i < list.length; i += 1) { var param = list[i]; if (param.type !== "Identifier") { return false } } return true }; // Checks function params for various disallowed patterns such as using "eval" // or "arguments" and duplicate parameters. pp$5.checkParams = function(node, allowDuplicates) { var nameHash = Object.create(null); for (var i = 0, list = node.params; i < list.length; i += 1) { var param = list[i]; this.checkLValInnerPattern(param, BIND_VAR, allowDuplicates ? null : nameHash); } }; // Parses a comma-separated list of expressions, and returns them as // an array. `close` is the token type that ends the list, and // `allowEmpty` can be turned on to allow subsequent commas with // nothing in between them to be parsed as `null` (which is needed // for array literals). pp$5.parseExprList = function(close, allowTrailingComma, allowEmpty, refDestructuringErrors) { var elts = [], first = true; while (!this.eat(close)) { if (!first) { this.expect(types$1.comma); if (allowTrailingComma && this.afterTrailingComma(close)) { break } } else { first = false; } var elt = (void 0); if (allowEmpty && this.type === types$1.comma) { elt = null; } else if (this.type === types$1.ellipsis) { elt = this.parseSpread(refDestructuringErrors); if (refDestructuringErrors && this.type === types$1.comma && refDestructuringErrors.trailingComma < 0) { refDestructuringErrors.trailingComma = this.start; } } else { elt = this.parseMaybeAssign(false, refDestructuringErrors); } elts.push(elt); } return elts }; pp$5.checkUnreserved = function(ref) { var start = ref.start; var end = ref.end; var name = ref.name; if (this.inGenerator && name === "yield") { this.raiseRecoverable(start, "Cannot use 'yield' as identifier inside a generator"); } if (this.inAsync && name === "await") { this.raiseRecoverable(start, "Cannot use 'await' as identifier inside an async function"); } if (this.currentThisScope().inClassFieldInit && name === "arguments") { this.raiseRecoverable(start, "Cannot use 'arguments' in class field initializer"); } if (this.inClassStaticBlock && (name === "arguments" || name === "await")) { this.raise(start, ("Cannot use " + name + " in class static initialization block")); } if (this.keywords.test(name)) { this.raise(start, ("Unexpected keyword '" + name + "'")); } if (this.options.ecmaVersion < 6 && this.input.slice(start, end).indexOf("\\") !== -1) { return } var re = this.strict ? this.reservedWordsStrict : this.reservedWords; if (re.test(name)) { if (!this.inAsync && name === "await") { this.raiseRecoverable(start, "Cannot use keyword 'await' outside an async function"); } this.raiseRecoverable(start, ("The keyword '" + name + "' is reserved")); } }; // Parse the next token as an identifier. If `liberal` is true (used // when parsing properties), it will also convert keywords into // identifiers. pp$5.parseIdent = function(liberal) { var node = this.parseIdentNode(); this.next(!!liberal); this.finishNode(node, "Identifier"); if (!liberal) { this.checkUnreserved(node); if (node.name === "await" && !this.awaitIdentPos) { this.awaitIdentPos = node.start; } } return node }; pp$5.parseIdentNode = function() { var node = this.startNode(); if (this.type === types$1.name) { node.name = this.value; } else if (this.type.keyword) { node.name = this.type.keyword; // To fix https://github.com/acornjs/acorn/issues/575 // `class` and `function` keywords push new context into this.context. // But there is no chance to pop the context if the keyword is consumed as an identifier such as a property name. // If the previous token is a dot, this does not apply because the context-managing code already ignored the keyword if ((node.name === "class" || node.name === "function") && (this.lastTokEnd !== this.lastTokStart + 1 || this.input.charCodeAt(this.lastTokStart) !== 46)) { this.context.pop(); } this.type = types$1.name; } else { this.unexpected(); } return node }; pp$5.parsePrivateIdent = function() { var node = this.startNode(); if (this.type === types$1.privateId) { node.name = this.value; } else { this.unexpected(); } this.next(); this.finishNode(node, "PrivateIdentifier"); // For validating existence if (this.options.checkPrivateFields) { if (this.privateNameStack.length === 0) { this.raise(node.start, ("Private field '#" + (node.name) + "' must be declared in an enclosing class")); } else { this.privateNameStack[this.privateNameStack.length - 1].used.push(node); } } return node }; // Parses yield expression inside generator. pp$5.parseYield = function(forInit) { if (!this.yieldPos) { this.yieldPos = this.start; } var node = this.startNode(); this.next(); if (this.type === types$1.semi || this.canInsertSemicolon() || (this.type !== types$1.star && !this.type.startsExpr)) { node.delegate = false; node.argument = null; } else { node.delegate = this.eat(types$1.star); node.argument = this.parseMaybeAssign(forInit); } return this.finishNode(node, "YieldExpression") }; pp$5.parseAwait = function(forInit) { if (!this.awaitPos) { this.awaitPos = this.start; } var node = this.startNode(); this.next(); node.argument = this.parseMaybeUnary(null, true, false, forInit); return this.finishNode(node, "AwaitExpression") }; var pp$4 = Parser.prototype; // This function is used to raise exceptions on parse errors. It // takes an offset integer (into the current `input`) to indicate // the location of the error, attaches the position to the end // of the error message, and then raises a `SyntaxError` with that // message. pp$4.raise = function(pos, message) { var loc = getLineInfo(this.input, pos); message += " (" + loc.line + ":" + loc.column + ")"; var err = new SyntaxError(message); err.pos = pos; err.loc = loc; err.raisedAt = this.pos; throw err }; pp$4.raiseRecoverable = pp$4.raise; pp$4.curPosition = function() { if (this.options.locations) { return new Position(this.curLine, this.pos - this.lineStart) } }; var pp$3 = Parser.prototype; var Scope = function Scope(flags) { this.flags = flags; // A list of var-declared names in the current lexical scope this.var = []; // A list of lexically-declared names in the current lexical scope this.lexical = []; // A list of lexically-declared FunctionDeclaration names in the current lexical scope this.functions = []; // A switch to disallow the identifier reference 'arguments' this.inClassFieldInit = false; }; // The functions in this module keep track of declared variables in the current scope in order to detect duplicate variable names. pp$3.enterScope = function(flags) { this.scopeStack.push(new Scope(flags)); }; pp$3.exitScope = function() { this.scopeStack.pop(); }; // The spec says: // > At the top level of a function, or script, function declarations are // > treated like var declarations rather than like lexical declarations. pp$3.treatFunctionsAsVarInScope = function(scope) { return (scope.flags & SCOPE_FUNCTION) || !this.inModule && (scope.flags & SCOPE_TOP) }; pp$3.declareName = function(name, bindingType, pos) { var redeclared = false; if (bindingType === BIND_LEXICAL) { var scope = this.currentScope(); redeclared = scope.lexical.indexOf(name) > -1 || scope.functions.indexOf(name) > -1 || scope.var.indexOf(name) > -1; scope.lexical.push(name); if (this.inModule && (scope.flags & SCOPE_TOP)) { delete this.undefinedExports[name]; } } else if (bindingType === BIND_SIMPLE_CATCH) { var scope$1 = this.currentScope(); scope$1.lexical.push(name); } else if (bindingType === BIND_FUNCTION) { var scope$2 = this.currentScope(); if (this.treatFunctionsAsVar) { redeclared = scope$2.lexical.indexOf(name) > -1; } else { redeclared = scope$2.lexical.indexOf(name) > -1 || scope$2.var.indexOf(name) > -1; } scope$2.functions.push(name); } else { for (var i = this.scopeStack.length - 1; i >= 0; --i) { var scope$3 = this.scopeStack[i]; if (scope$3.lexical.indexOf(name) > -1 && !((scope$3.flags & SCOPE_SIMPLE_CATCH) && scope$3.lexical[0] === name) || !this.treatFunctionsAsVarInScope(scope$3) && scope$3.functions.indexOf(name) > -1) { redeclared = true; break } scope$3.var.push(name); if (this.inModule && (scope$3.flags & SCOPE_TOP)) { delete this.undefinedExports[name]; } if (scope$3.flags & SCOPE_VAR) { break } } } if (redeclared) { this.raiseRecoverable(pos, ("Identifier '" + name + "' has already been declared")); } }; pp$3.checkLocalExport = function(id) { // scope.functions must be empty as Module code is always strict. if (this.scopeStack[0].lexical.indexOf(id.name) === -1 && this.scopeStack[0].var.indexOf(id.name) === -1) { this.undefinedExports[id.name] = id; } }; pp$3.currentScope = function() { return this.scopeStack[this.scopeStack.length - 1] }; pp$3.currentVarScope = function() { for (var i = this.scopeStack.length - 1;; i--) { var scope = this.scopeStack[i]; if (scope.flags & SCOPE_VAR) { return scope } } }; // Could be useful for `this`, `new.target`, `super()`, `super.property`, and `super[property]`. pp$3.currentThisScope = function() { for (var i = this.scopeStack.length - 1;; i--) { var scope = this.scopeStack[i]; if (scope.flags & SCOPE_VAR && !(scope.flags & SCOPE_ARROW)) { return scope } } }; var Node = function Node(parser, pos, loc) { this.type = ""; this.start = pos; this.end = 0; if (parser.options.locations) { this.loc = new SourceLocation(parser, loc); } if (parser.options.directSourceFile) { this.sourceFile = parser.options.directSourceFile; } if (parser.options.ranges) { this.range = [pos, 0]; } }; // Start an AST node, attaching a start offset. var pp$2 = Parser.prototype; pp$2.startNode = function() { return new Node(this, this.start, this.startLoc) }; pp$2.startNodeAt = function(pos, loc) { return new Node(this, pos, loc) }; // Finish an AST node, adding `type` and `end` properties. function finishNodeAt(node, type, pos, loc) { node.type = type; node.end = pos; if (this.options.locations) { node.loc.end = loc; } if (this.options.ranges) { node.range[1] = pos; } return node } pp$2.finishNode = function(node, type) { return finishNodeAt.call(this, node, type, this.lastTokEnd, this.lastTokEndLoc) }; // Finish node at given position pp$2.finishNodeAt = function(node, type, pos, loc) { return finishNodeAt.call(this, node, type, pos, loc) }; pp$2.copyNode = function(node) { var newNode = new Node(this, node.start, this.startLoc); for (var prop in node) { newNode[prop] = node[prop]; } return newNode }; // This file contains Unicode properties extracted from the ECMAScript specification. // The lists are extracted like so: // $$('#table-binary-unicode-properties > figure > table > tbody > tr > td:nth-child(1) code').map(el => el.innerText) // #table-binary-unicode-properties var ecma9BinaryProperties = "ASCII ASCII_Hex_Digit AHex Alphabetic Alpha Any Assigned Bidi_Control Bidi_C Bidi_Mirrored Bidi_M Case_Ignorable CI Cased Changes_When_Casefolded CWCF Changes_When_Casemapped CWCM Changes_When_Lowercased CWL Changes_When_NFKC_Casefolded CWKCF Changes_When_Titlecased CWT Changes_When_Uppercased CWU Dash Default_Ignorable_Code_Point DI Deprecated Dep Diacritic Dia Emoji Emoji_Component Emoji_Modifier Emoji_Modifier_Base Emoji_Presentation Extender Ext Grapheme_Base Gr_Base Grapheme_Extend Gr_Ext Hex_Digit Hex IDS_Binary_Operator IDSB IDS_Trinary_Operator IDST ID_Continue IDC ID_Start IDS Ideographic Ideo Join_Control Join_C Logical_Order_Exception LOE Lowercase Lower Math Noncharacter_Code_Point NChar Pattern_Syntax Pat_Syn Pattern_White_Space Pat_WS Quotation_Mark QMark Radical Regional_Indicator RI Sentence_Terminal STerm Soft_Dotted SD Terminal_Punctuation Term Unified_Ideograph UIdeo Uppercase Upper Variation_Selector VS White_Space space XID_Continue XIDC XID_Start XIDS"; var ecma10BinaryProperties = ecma9BinaryProperties + " Extended_Pictographic"; var ecma11BinaryProperties = ecma10BinaryProperties; var ecma12BinaryProperties = ecma11BinaryProperties + " EBase EComp EMod EPres ExtPict"; var ecma13BinaryProperties = ecma12BinaryProperties; var ecma14BinaryProperties = ecma13BinaryProperties; var unicodeBinaryProperties = { 9: ecma9BinaryProperties, 10: ecma10BinaryProperties, 11: ecma11BinaryProperties, 12: ecma12BinaryProperties, 13: ecma13BinaryProperties, 14: ecma14BinaryProperties }; // #table-binary-unicode-properties-of-strings var ecma14BinaryPropertiesOfStrings = "Basic_Emoji Emoji_Keycap_Sequence RGI_Emoji_Modifier_Sequence RGI_Emoji_Flag_Sequence RGI_Emoji_Tag_Sequence RGI_Emoji_ZWJ_Sequence RGI_Emoji"; var unicodeBinaryPropertiesOfStrings = { 9: "", 10: "", 11: "", 12: "", 13: "", 14: ecma14BinaryPropertiesOfStrings }; // #table-unicode-general-category-values var unicodeGeneralCategoryValues = "Cased_Letter LC Close_Punctuation Pe Connector_Punctuation Pc Control Cc cntrl Currency_Symbol Sc Dash_Punctuation Pd Decimal_Number Nd digit Enclosing_Mark Me Final_Punctuation Pf Format Cf Initial_Punctuation Pi Letter L Letter_Number Nl Line_Separator Zl Lowercase_Letter Ll Mark M Combining_Mark Math_Symbol Sm Modifier_Letter Lm Modifier_Symbol Sk Nonspacing_Mark Mn Number N Open_Punctuation Ps Other C Other_Letter Lo Other_Number No Other_Punctuation Po Other_Symbol So Paragraph_Separator Zp Private_Use Co Punctuation P punct Separator Z Space_Separator Zs Spacing_Mark Mc Surrogate Cs Symbol S Titlecase_Letter Lt Unassigned Cn Uppercase_Letter Lu"; // #table-unicode-script-values var ecma9ScriptValues = "Adlam Adlm Ahom Anatolian_Hieroglyphs Hluw Arabic Arab Armenian Armn Avestan Avst Balinese Bali Bamum Bamu Bassa_Vah Bass Batak Batk Bengali Beng Bhaiksuki Bhks Bopomofo Bopo Brahmi Brah Braille Brai Buginese Bugi Buhid Buhd Canadian_Aboriginal Cans Carian Cari Caucasian_Albanian Aghb Chakma Cakm Cham Cham Cherokee Cher Common Zyyy Coptic Copt Qaac Cuneiform Xsux Cypriot Cprt Cyrillic Cyrl Deseret Dsrt Devanagari Deva Duployan Dupl Egyptian_Hieroglyphs Egyp Elbasan Elba Ethiopic Ethi Georgian Geor Glagolitic Glag Gothic Goth Grantha Gran Greek Grek Gujarati Gujr Gurmukhi Guru Han Hani Hangul Hang Hanunoo Hano Hatran Hatr Hebrew Hebr Hiragana Hira Imperial_Aramaic Armi Inherited Zinh Qaai Inscriptional_Pahlavi Phli Inscriptional_Parthian Prti Javanese Java Kaithi Kthi Kannada Knda Katakana Kana Kayah_Li Kali Kharoshthi Khar Khmer Khmr Khojki Khoj Khudawadi Sind Lao Laoo Latin Latn Lepcha Lepc Limbu Limb Linear_A Lina Linear_B Linb Lisu Lisu Lycian Lyci Lydian Lydi Mahajani Mahj Malayalam Mlym Mandaic Mand Manichaean Mani Marchen Marc Masaram_Gondi Gonm Meetei_Mayek Mtei Mende_Kikakui Mend Meroitic_Cursive Merc Meroitic_Hieroglyphs Mero Miao Plrd Modi Mongolian Mong Mro Mroo Multani Mult Myanmar Mymr Nabataean Nbat New_Tai_Lue Talu Newa Newa Nko Nkoo Nushu Nshu Ogham Ogam Ol_Chiki Olck Old_Hungarian Hung Old_Italic Ital Old_North_Arabian Narb Old_Permic Perm Old_Persian Xpeo Old_South_Arabian Sarb Old_Turkic Orkh Oriya Orya Osage Osge Osmanya Osma Pahawh_Hmong Hmng Palmyrene Palm Pau_Cin_Hau Pauc Phags_Pa Phag Phoenician Phnx Psalter_Pahlavi Phlp Rejang Rjng Runic Runr Samaritan Samr Saurashtra Saur Sharada Shrd Shavian Shaw Siddham Sidd SignWriting Sgnw Sinhala Sinh Sora_Sompeng Sora Soyombo Soyo Sundanese Sund Syloti_Nagri Sylo Syriac Syrc Tagalog Tglg Tagbanwa Tagb Tai_Le Tale Tai_Tham Lana Tai_Viet Tavt Takri Takr Tamil Taml Tangut Tang Telugu Telu Thaana Thaa Thai Thai Tibetan Tibt Tifinagh Tfng Tirhuta Tirh Ugaritic Ugar Vai Vaii Warang_Citi Wara Yi Yiii Zanabazar_Square Zanb"; var ecma10ScriptValues = ecma9ScriptValues + " Dogra Dogr Gunjala_Gondi Gong Hanifi_Rohingya Rohg Makasar Maka Medefaidrin Medf Old_Sogdian Sogo Sogdian Sogd"; var ecma11ScriptValues = ecma10ScriptValues + " Elymaic Elym Nandinagari Nand Nyiakeng_Puachue_Hmong Hmnp Wancho Wcho"; var ecma12ScriptValues = ecma11ScriptValues + " Chorasmian Chrs Diak Dives_Akuru Khitan_Small_Script Kits Yezi Yezidi"; var ecma13ScriptValues = ecma12ScriptValues + " Cypro_Minoan Cpmn Old_Uyghur Ougr Tangsa Tnsa Toto Vithkuqi Vith"; var ecma14ScriptValues = ecma13ScriptValues + " Hrkt Katakana_Or_Hiragana Kawi Nag_Mundari Nagm Unknown Zzzz"; var unicodeScriptValues = { 9: ecma9ScriptValues, 10: ecma10ScriptValues, 11: ecma11ScriptValues, 12: ecma12ScriptValues, 13: ecma13ScriptValues, 14: ecma14ScriptValues }; var data = {}; function buildUnicodeData(ecmaVersion) { var d = data[ecmaVersion] = { binary: wordsRegexp(unicodeBinaryProperties[ecmaVersion] + " " + unicodeGeneralCategoryValues), binaryOfStrings: wordsRegexp(unicodeBinaryPropertiesOfStrings[ecmaVersion]), nonBinary: { General_Category: wordsRegexp(unicodeGeneralCategoryValues), Script: wordsRegexp(unicodeScriptValues[ecmaVersion]) } }; d.nonBinary.Script_Extensions = d.nonBinary.Script; d.nonBinary.gc = d.nonBinary.General_Category; d.nonBinary.sc = d.nonBinary.Script; d.nonBinary.scx = d.nonBinary.Script_Extensions; } for (var i = 0, list = [9, 10, 11, 12, 13, 14]; i < list.length; i += 1) { var ecmaVersion = list[i]; buildUnicodeData(ecmaVersion); } var pp$1 = Parser.prototype; // Track disjunction structure to determine whether a duplicate // capture group name is allowed because it is in a separate branch. var BranchID = function BranchID(parent, base) { // Parent disjunction branch this.parent = parent; // Identifies this set of sibling branches this.base = base || this; }; BranchID.prototype.separatedFrom = function separatedFrom (alt) { // A branch is separate from another branch if they or any of // their parents are siblings in a given disjunction for (var self = this; self; self = self.parent) { for (var other = alt; other; other = other.parent) { if (self.base === other.base && self !== other) { return true } } } return false }; BranchID.prototype.sibling = function sibling () { return new BranchID(this.parent, this.base) }; var RegExpValidationState = function RegExpValidationState(parser) { this.parser = parser; this.validFlags = "gim" + (parser.options.ecmaVersion >= 6 ? "uy" : "") + (parser.options.ecmaVersion >= 9 ? "s" : "") + (parser.options.ecmaVersion >= 13 ? "d" : "") + (parser.options.ecmaVersion >= 15 ? "v" : ""); this.unicodeProperties = data[parser.options.ecmaVersion >= 14 ? 14 : parser.options.ecmaVersion]; this.source = ""; this.flags = ""; this.start = 0; this.switchU = false; this.switchV = false; this.switchN = false; this.pos = 0; this.lastIntValue = 0; this.lastStringValue = ""; this.lastAssertionIsQuantifiable = false; this.numCapturingParens = 0; this.maxBackReference = 0; this.groupNames = Object.create(null); this.backReferenceNames = []; this.branchID = null; }; RegExpValidationState.prototype.reset = function reset (start, pattern, flags) { var unicodeSets = flags.indexOf("v") !== -1; var unicode = flags.indexOf("u") !== -1; this.start = start | 0; this.source = pattern + ""; this.flags = flags; if (unicodeSets && this.parser.options.ecmaVersion >= 15) { this.switchU = true; this.switchV = true; this.switchN = true; } else { this.switchU = unicode && this.parser.options.ecmaVersion >= 6; this.switchV = false; this.switchN = unicode && this.parser.options.ecmaVersion >= 9; } }; RegExpValidationState.prototype.raise = function raise (message) { this.parser.raiseRecoverable(this.start, ("Invalid regular expression: /" + (this.source) + "/: " + message)); }; // If u flag is given, this returns the code point at the index (it combines a surrogate pair). // Otherwise, this returns the code unit of the index (can be a part of a surrogate pair). RegExpValidationState.prototype.at = function at (i, forceU) { if ( forceU === void 0 ) forceU = false; var s = this.source; var l = s.length; if (i >= l) { return -1 } var c = s.charCodeAt(i); if (!(forceU || this.switchU) || c <= 0xD7FF || c >= 0xE000 || i + 1 >= l) { return c } var next = s.charCodeAt(i + 1); return next >= 0xDC00 && next <= 0xDFFF ? (c << 10) + next - 0x35FDC00 : c }; RegExpValidationState.prototype.nextIndex = function nextIndex (i, forceU) { if ( forceU === void 0 ) forceU = false; var s = this.source; var l = s.length; if (i >= l) { return l } var c = s.charCodeAt(i), next; if (!(forceU || this.switchU) || c <= 0xD7FF || c >= 0xE000 || i + 1 >= l || (next = s.charCodeAt(i + 1)) < 0xDC00 || next > 0xDFFF) { return i + 1 } return i + 2 }; RegExpValidationState.prototype.current = function current (forceU) { if ( forceU === void 0 ) forceU = false; return this.at(this.pos, forceU) }; RegExpValidationState.prototype.lookahead = function lookahead (forceU) { if ( forceU === void 0 ) forceU = false; return this.at(this.nextIndex(this.pos, forceU), forceU) }; RegExpValidationState.prototype.advance = function advance (forceU) { if ( forceU === void 0 ) forceU = false; this.pos = this.nextIndex(this.pos, forceU); }; RegExpValidationState.prototype.eat = function eat (ch, forceU) { if ( forceU === void 0 ) forceU = false; if (this.current(forceU) === ch) { this.advance(forceU); return true } return false }; RegExpValidationState.prototype.eatChars = function eatChars (chs, forceU) { if ( forceU === void 0 ) forceU = false; var pos = this.pos; for (var i = 0, list = chs; i < list.length; i += 1) { var ch = list[i]; var current = this.at(pos, forceU); if (current === -1 || current !== ch) { return false } pos = this.nextIndex(pos, forceU); } this.pos = pos; return true }; /** * Validate the flags part of a given RegExpLiteral. * * @param {RegExpValidationState} state The state to validate RegExp. * @returns {void} */ pp$1.validateRegExpFlags = function(state) { var validFlags = state.validFlags; var flags = state.flags; var u = false; var v = false; for (var i = 0; i < flags.length; i++) { var flag = flags.charAt(i); if (validFlags.indexOf(flag) === -1) { this.raise(state.start, "Invalid regular expression flag"); } if (flags.indexOf(flag, i + 1) > -1) { this.raise(state.start, "Duplicate regular expression flag"); } if (flag === "u") { u = true; } if (flag === "v") { v = true; } } if (this.options.ecmaVersion >= 15 && u && v) { this.raise(state.start, "Invalid regular expression flag"); } }; function hasProp(obj) { for (var _ in obj) { return true } return false } /** * Validate the pattern part of a given RegExpLiteral. * * @param {RegExpValidationState} state The state to validate RegExp. * @returns {void} */ pp$1.validateRegExpPattern = function(state) { this.regexp_pattern(state); // The goal symbol for the parse is |Pattern[~U, ~N]|. If the result of // parsing contains a |GroupName|, reparse with the goal symbol // |Pattern[~U, +N]| and use this result instead. Throw a *SyntaxError* // exception if _P_ did not conform to the grammar, if any elements of _P_ // were not matched by the parse, or if any Early Error conditions exist. if (!state.switchN && this.options.ecmaVersion >= 9 && hasProp(state.groupNames)) { state.switchN = true; this.regexp_pattern(state); } }; // https://www.ecma-international.org/ecma-262/8.0/#prod-Pattern pp$1.regexp_pattern = function(state) { state.pos = 0; state.lastIntValue = 0; state.lastStringValue = ""; state.lastAssertionIsQuantifiable = false; state.numCapturingParens = 0; state.maxBackReference = 0; state.groupNames = Object.create(null); state.backReferenceNames.length = 0; state.branchID = null; this.regexp_disjunction(state); if (state.pos !== state.source.length) { // Make the same messages as V8. if (state.eat(0x29 /* ) */)) { state.raise("Unmatched ')'"); } if (state.eat(0x5D /* ] */) || state.eat(0x7D /* } */)) { state.raise("Lone quantifier brackets"); } } if (state.maxBackReference > state.numCapturingParens) { state.raise("Invalid escape"); } for (var i = 0, list = state.backReferenceNames; i < list.length; i += 1) { var name = list[i]; if (!state.groupNames[name]) { state.raise("Invalid named capture referenced"); } } }; // https://www.ecma-international.org/ecma-262/8.0/#prod-Disjunction pp$1.regexp_disjunction = function(state) { var trackDisjunction = this.options.ecmaVersion >= 16; if (trackDisjunction) { state.branchID = new BranchID(state.branchID, null); } this.regexp_alternative(state); while (state.eat(0x7C /* | */)) { if (trackDisjunction) { state.branchID = state.branchID.sibling(); } this.regexp_alternative(state); } if (trackDisjunction) { state.branchID = state.branchID.parent; } // Make the same message as V8. if (this.regexp_eatQuantifier(state, true)) { state.raise("Nothing to repeat"); } if (state.eat(0x7B /* { */)) { state.raise("Lone quantifier brackets"); } }; // https://www.ecma-international.org/ecma-262/8.0/#prod-Alternative pp$1.regexp_alternative = function(state) { while (state.pos < state.source.length && this.regexp_eatTerm(state)) {} }; // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-Term pp$1.regexp_eatTerm = function(state) { if (this.regexp_eatAssertion(state)) { // Handle `QuantifiableAssertion Quantifier` alternative. // `state.lastAssertionIsQuantifiable` is true if the last eaten Assertion // is a QuantifiableAssertion. if (state.lastAssertionIsQuantifiable && this.regexp_eatQuantifier(state)) { // Make the same message as V8. if (state.switchU) { state.raise("Invalid quantifier"); } } return true } if (state.switchU ? this.regexp_eatAtom(state) : this.regexp_eatExtendedAtom(state)) { this.regexp_eatQuantifier(state); return true } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-Assertion pp$1.regexp_eatAssertion = function(state) { var start = state.pos; state.lastAssertionIsQuantifiable = false; // ^, $ if (state.eat(0x5E /* ^ */) || state.eat(0x24 /* $ */)) { return true } // \b \B if (state.eat(0x5C /* \ */)) { if (state.eat(0x42 /* B */) || state.eat(0x62 /* b */)) { return true } state.pos = start; } // Lookahead / Lookbehind if (state.eat(0x28 /* ( */) && state.eat(0x3F /* ? */)) { var lookbehind = false; if (this.options.ecmaVersion >= 9) { lookbehind = state.eat(0x3C /* < */); } if (state.eat(0x3D /* = */) || state.eat(0x21 /* ! */)) { this.regexp_disjunction(state); if (!state.eat(0x29 /* ) */)) { state.raise("Unterminated group"); } state.lastAssertionIsQuantifiable = !lookbehind; return true } } state.pos = start; return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-Quantifier pp$1.regexp_eatQuantifier = function(state, noError) { if ( noError === void 0 ) noError = false; if (this.regexp_eatQuantifierPrefix(state, noError)) { state.eat(0x3F /* ? */); return true } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-QuantifierPrefix pp$1.regexp_eatQuantifierPrefix = function(state, noError) { return ( state.eat(0x2A /* * */) || state.eat(0x2B /* + */) || state.eat(0x3F /* ? */) || this.regexp_eatBracedQuantifier(state, noError) ) }; pp$1.regexp_eatBracedQuantifier = function(state, noError) { var start = state.pos; if (state.eat(0x7B /* { */)) { var min = 0, max = -1; if (this.regexp_eatDecimalDigits(state)) { min = state.lastIntValue; if (state.eat(0x2C /* , */) && this.regexp_eatDecimalDigits(state)) { max = state.lastIntValue; } if (state.eat(0x7D /* } */)) { // SyntaxError in https://www.ecma-international.org/ecma-262/8.0/#sec-term if (max !== -1 && max < min && !noError) { state.raise("numbers out of order in {} quantifier"); } return true } } if (state.switchU && !noError) { state.raise("Incomplete quantifier"); } state.pos = start; } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-Atom pp$1.regexp_eatAtom = function(state) { return ( this.regexp_eatPatternCharacters(state) || state.eat(0x2E /* . */) || this.regexp_eatReverseSolidusAtomEscape(state) || this.regexp_eatCharacterClass(state) || this.regexp_eatUncapturingGroup(state) || this.regexp_eatCapturingGroup(state) ) }; pp$1.regexp_eatReverseSolidusAtomEscape = function(state) { var start = state.pos; if (state.eat(0x5C /* \ */)) { if (this.regexp_eatAtomEscape(state)) { return true } state.pos = start; } return false }; pp$1.regexp_eatUncapturingGroup = function(state) { var start = state.pos; if (state.eat(0x28 /* ( */)) { if (state.eat(0x3F /* ? */) && state.eat(0x3A /* : */)) { this.regexp_disjunction(state); if (state.eat(0x29 /* ) */)) { return true } state.raise("Unterminated group"); } state.pos = start; } return false }; pp$1.regexp_eatCapturingGroup = function(state) { if (state.eat(0x28 /* ( */)) { if (this.options.ecmaVersion >= 9) { this.regexp_groupSpecifier(state); } else if (state.current() === 0x3F /* ? */) { state.raise("Invalid group"); } this.regexp_disjunction(state); if (state.eat(0x29 /* ) */)) { state.numCapturingParens += 1; return true } state.raise("Unterminated group"); } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-ExtendedAtom pp$1.regexp_eatExtendedAtom = function(state) { return ( state.eat(0x2E /* . */) || this.regexp_eatReverseSolidusAtomEscape(state) || this.regexp_eatCharacterClass(state) || this.regexp_eatUncapturingGroup(state) || this.regexp_eatCapturingGroup(state) || this.regexp_eatInvalidBracedQuantifier(state) || this.regexp_eatExtendedPatternCharacter(state) ) }; // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-InvalidBracedQuantifier pp$1.regexp_eatInvalidBracedQuantifier = function(state) { if (this.regexp_eatBracedQuantifier(state, true)) { state.raise("Nothing to repeat"); } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-SyntaxCharacter pp$1.regexp_eatSyntaxCharacter = function(state) { var ch = state.current(); if (isSyntaxCharacter(ch)) { state.lastIntValue = ch; state.advance(); return true } return false }; function isSyntaxCharacter(ch) { return ( ch === 0x24 /* $ */ || ch >= 0x28 /* ( */ && ch <= 0x2B /* + */ || ch === 0x2E /* . */ || ch === 0x3F /* ? */ || ch >= 0x5B /* [ */ && ch <= 0x5E /* ^ */ || ch >= 0x7B /* { */ && ch <= 0x7D /* } */ ) } // https://www.ecma-international.org/ecma-262/8.0/#prod-PatternCharacter // But eat eager. pp$1.regexp_eatPatternCharacters = function(state) { var start = state.pos; var ch = 0; while ((ch = state.current()) !== -1 && !isSyntaxCharacter(ch)) { state.advance(); } return state.pos !== start }; // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-ExtendedPatternCharacter pp$1.regexp_eatExtendedPatternCharacter = function(state) { var ch = state.current(); if ( ch !== -1 && ch !== 0x24 /* $ */ && !(ch >= 0x28 /* ( */ && ch <= 0x2B /* + */) && ch !== 0x2E /* . */ && ch !== 0x3F /* ? */ && ch !== 0x5B /* [ */ && ch !== 0x5E /* ^ */ && ch !== 0x7C /* | */ ) { state.advance(); return true } return false }; // GroupSpecifier :: // [empty] // `?` GroupName pp$1.regexp_groupSpecifier = function(state) { if (state.eat(0x3F /* ? */)) { if (!this.regexp_eatGroupName(state)) { state.raise("Invalid group"); } var trackDisjunction = this.options.ecmaVersion >= 16; var known = state.groupNames[state.lastStringValue]; if (known) { if (trackDisjunction) { for (var i = 0, list = known; i < list.length; i += 1) { var altID = list[i]; if (!altID.separatedFrom(state.branchID)) { state.raise("Duplicate capture group name"); } } } else { state.raise("Duplicate capture group name"); } } if (trackDisjunction) { (known || (state.groupNames[state.lastStringValue] = [])).push(state.branchID); } else { state.groupNames[state.lastStringValue] = true; } } }; // GroupName :: // `<` RegExpIdentifierName `>` // Note: this updates `state.lastStringValue` property with the eaten name. pp$1.regexp_eatGroupName = function(state) { state.lastStringValue = ""; if (state.eat(0x3C /* < */)) { if (this.regexp_eatRegExpIdentifierName(state) && state.eat(0x3E /* > */)) { return true } state.raise("Invalid capture group name"); } return false }; // RegExpIdentifierName :: // RegExpIdentifierStart // RegExpIdentifierName RegExpIdentifierPart // Note: this updates `state.lastStringValue` property with the eaten name. pp$1.regexp_eatRegExpIdentifierName = function(state) { state.lastStringValue = ""; if (this.regexp_eatRegExpIdentifierStart(state)) { state.lastStringValue += codePointToString(state.lastIntValue); while (this.regexp_eatRegExpIdentifierPart(state)) { state.lastStringValue += codePointToString(state.lastIntValue); } return true } return false }; // RegExpIdentifierStart :: // UnicodeIDStart // `$` // `_` // `\` RegExpUnicodeEscapeSequence[+U] pp$1.regexp_eatRegExpIdentifierStart = function(state) { var start = state.pos; var forceU = this.options.ecmaVersion >= 11; var ch = state.current(forceU); state.advance(forceU); if (ch === 0x5C /* \ */ && this.regexp_eatRegExpUnicodeEscapeSequence(state, forceU)) { ch = state.lastIntValue; } if (isRegExpIdentifierStart(ch)) { state.lastIntValue = ch; return true } state.pos = start; return false }; function isRegExpIdentifierStart(ch) { return isIdentifierStart(ch, true) || ch === 0x24 /* $ */ || ch === 0x5F /* _ */ } // RegExpIdentifierPart :: // UnicodeIDContinue // `$` // `_` // `\` RegExpUnicodeEscapeSequence[+U] // // pp$1.regexp_eatRegExpIdentifierPart = function(state) { var start = state.pos; var forceU = this.options.ecmaVersion >= 11; var ch = state.current(forceU); state.advance(forceU); if (ch === 0x5C /* \ */ && this.regexp_eatRegExpUnicodeEscapeSequence(state, forceU)) { ch = state.lastIntValue; } if (isRegExpIdentifierPart(ch)) { state.lastIntValue = ch; return true } state.pos = start; return false }; function isRegExpIdentifierPart(ch) { return isIdentifierChar(ch, true) || ch === 0x24 /* $ */ || ch === 0x5F /* _ */ || ch === 0x200C /* */ || ch === 0x200D /* */ } // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-AtomEscape pp$1.regexp_eatAtomEscape = function(state) { if ( this.regexp_eatBackReference(state) || this.regexp_eatCharacterClassEscape(state) || this.regexp_eatCharacterEscape(state) || (state.switchN && this.regexp_eatKGroupName(state)) ) { return true } if (state.switchU) { // Make the same message as V8. if (state.current() === 0x63 /* c */) { state.raise("Invalid unicode escape"); } state.raise("Invalid escape"); } return false }; pp$1.regexp_eatBackReference = function(state) { var start = state.pos; if (this.regexp_eatDecimalEscape(state)) { var n = state.lastIntValue; if (state.switchU) { // For SyntaxError in https://www.ecma-international.org/ecma-262/8.0/#sec-atomescape if (n > state.maxBackReference) { state.maxBackReference = n; } return true } if (n <= state.numCapturingParens) { return true } state.pos = start; } return false }; pp$1.regexp_eatKGroupName = function(state) { if (state.eat(0x6B /* k */)) { if (this.regexp_eatGroupName(state)) { state.backReferenceNames.push(state.lastStringValue); return true } state.raise("Invalid named reference"); } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-CharacterEscape pp$1.regexp_eatCharacterEscape = function(state) { return ( this.regexp_eatControlEscape(state) || this.regexp_eatCControlLetter(state) || this.regexp_eatZero(state) || this.regexp_eatHexEscapeSequence(state) || this.regexp_eatRegExpUnicodeEscapeSequence(state, false) || (!state.switchU && this.regexp_eatLegacyOctalEscapeSequence(state)) || this.regexp_eatIdentityEscape(state) ) }; pp$1.regexp_eatCControlLetter = function(state) { var start = state.pos; if (state.eat(0x63 /* c */)) { if (this.regexp_eatControlLetter(state)) { return true } state.pos = start; } return false }; pp$1.regexp_eatZero = function(state) { if (state.current() === 0x30 /* 0 */ && !isDecimalDigit(state.lookahead())) { state.lastIntValue = 0; state.advance(); return true } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-ControlEscape pp$1.regexp_eatControlEscape = function(state) { var ch = state.current(); if (ch === 0x74 /* t */) { state.lastIntValue = 0x09; /* \t */ state.advance(); return true } if (ch === 0x6E /* n */) { state.lastIntValue = 0x0A; /* \n */ state.advance(); return true } if (ch === 0x76 /* v */) { state.lastIntValue = 0x0B; /* \v */ state.advance(); return true } if (ch === 0x66 /* f */) { state.lastIntValue = 0x0C; /* \f */ state.advance(); return true } if (ch === 0x72 /* r */) { state.lastIntValue = 0x0D; /* \r */ state.advance(); return true } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-ControlLetter pp$1.regexp_eatControlLetter = function(state) { var ch = state.current(); if (isControlLetter(ch)) { state.lastIntValue = ch % 0x20; state.advance(); return true } return false }; function isControlLetter(ch) { return ( (ch >= 0x41 /* A */ && ch <= 0x5A /* Z */) || (ch >= 0x61 /* a */ && ch <= 0x7A /* z */) ) } // https://www.ecma-international.org/ecma-262/8.0/#prod-RegExpUnicodeEscapeSequence pp$1.regexp_eatRegExpUnicodeEscapeSequence = function(state, forceU) { if ( forceU === void 0 ) forceU = false; var start = state.pos; var switchU = forceU || state.switchU; if (state.eat(0x75 /* u */)) { if (this.regexp_eatFixedHexDigits(state, 4)) { var lead = state.lastIntValue; if (switchU && lead >= 0xD800 && lead <= 0xDBFF) { var leadSurrogateEnd = state.pos; if (state.eat(0x5C /* \ */) && state.eat(0x75 /* u */) && this.regexp_eatFixedHexDigits(state, 4)) { var trail = state.lastIntValue; if (trail >= 0xDC00 && trail <= 0xDFFF) { state.lastIntValue = (lead - 0xD800) * 0x400 + (trail - 0xDC00) + 0x10000; return true } } state.pos = leadSurrogateEnd; state.lastIntValue = lead; } return true } if ( switchU && state.eat(0x7B /* { */) && this.regexp_eatHexDigits(state) && state.eat(0x7D /* } */) && isValidUnicode(state.lastIntValue) ) { return true } if (switchU) { state.raise("Invalid unicode escape"); } state.pos = start; } return false }; function isValidUnicode(ch) { return ch >= 0 && ch <= 0x10FFFF } // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-IdentityEscape pp$1.regexp_eatIdentityEscape = function(state) { if (state.switchU) { if (this.regexp_eatSyntaxCharacter(state)) { return true } if (state.eat(0x2F /* / */)) { state.lastIntValue = 0x2F; /* / */ return true } return false } var ch = state.current(); if (ch !== 0x63 /* c */ && (!state.switchN || ch !== 0x6B /* k */)) { state.lastIntValue = ch; state.advance(); return true } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-DecimalEscape pp$1.regexp_eatDecimalEscape = function(state) { state.lastIntValue = 0; var ch = state.current(); if (ch >= 0x31 /* 1 */ && ch <= 0x39 /* 9 */) { do { state.lastIntValue = 10 * state.lastIntValue + (ch - 0x30 /* 0 */); state.advance(); } while ((ch = state.current()) >= 0x30 /* 0 */ && ch <= 0x39 /* 9 */) return true } return false }; // Return values used by character set parsing methods, needed to // forbid negation of sets that can match strings. var CharSetNone = 0; // Nothing parsed var CharSetOk = 1; // Construct parsed, cannot contain strings var CharSetString = 2; // Construct parsed, can contain strings // https://www.ecma-international.org/ecma-262/8.0/#prod-CharacterClassEscape pp$1.regexp_eatCharacterClassEscape = function(state) { var ch = state.current(); if (isCharacterClassEscape(ch)) { state.lastIntValue = -1; state.advance(); return CharSetOk } var negate = false; if ( state.switchU && this.options.ecmaVersion >= 9 && ((negate = ch === 0x50 /* P */) || ch === 0x70 /* p */) ) { state.lastIntValue = -1; state.advance(); var result; if ( state.eat(0x7B /* { */) && (result = this.regexp_eatUnicodePropertyValueExpression(state)) && state.eat(0x7D /* } */) ) { if (negate && result === CharSetString) { state.raise("Invalid property name"); } return result } state.raise("Invalid property name"); } return CharSetNone }; function isCharacterClassEscape(ch) { return ( ch === 0x64 /* d */ || ch === 0x44 /* D */ || ch === 0x73 /* s */ || ch === 0x53 /* S */ || ch === 0x77 /* w */ || ch === 0x57 /* W */ ) } // UnicodePropertyValueExpression :: // UnicodePropertyName `=` UnicodePropertyValue // LoneUnicodePropertyNameOrValue pp$1.regexp_eatUnicodePropertyValueExpression = function(state) { var start = state.pos; // UnicodePropertyName `=` UnicodePropertyValue if (this.regexp_eatUnicodePropertyName(state) && state.eat(0x3D /* = */)) { var name = state.lastStringValue; if (this.regexp_eatUnicodePropertyValue(state)) { var value = state.lastStringValue; this.regexp_validateUnicodePropertyNameAndValue(state, name, value); return CharSetOk } } state.pos = start; // LoneUnicodePropertyNameOrValue if (this.regexp_eatLoneUnicodePropertyNameOrValue(state)) { var nameOrValue = state.lastStringValue; return this.regexp_validateUnicodePropertyNameOrValue(state, nameOrValue) } return CharSetNone }; pp$1.regexp_validateUnicodePropertyNameAndValue = function(state, name, value) { if (!hasOwn(state.unicodeProperties.nonBinary, name)) { state.raise("Invalid property name"); } if (!state.unicodeProperties.nonBinary[name].test(value)) { state.raise("Invalid property value"); } }; pp$1.regexp_validateUnicodePropertyNameOrValue = function(state, nameOrValue) { if (state.unicodeProperties.binary.test(nameOrValue)) { return CharSetOk } if (state.switchV && state.unicodeProperties.binaryOfStrings.test(nameOrValue)) { return CharSetString } state.raise("Invalid property name"); }; // UnicodePropertyName :: // UnicodePropertyNameCharacters pp$1.regexp_eatUnicodePropertyName = function(state) { var ch = 0; state.lastStringValue = ""; while (isUnicodePropertyNameCharacter(ch = state.current())) { state.lastStringValue += codePointToString(ch); state.advance(); } return state.lastStringValue !== "" }; function isUnicodePropertyNameCharacter(ch) { return isControlLetter(ch) || ch === 0x5F /* _ */ } // UnicodePropertyValue :: // UnicodePropertyValueCharacters pp$1.regexp_eatUnicodePropertyValue = function(state) { var ch = 0; state.lastStringValue = ""; while (isUnicodePropertyValueCharacter(ch = state.current())) { state.lastStringValue += codePointToString(ch); state.advance(); } return state.lastStringValue !== "" }; function isUnicodePropertyValueCharacter(ch) { return isUnicodePropertyNameCharacter(ch) || isDecimalDigit(ch) } // LoneUnicodePropertyNameOrValue :: // UnicodePropertyValueCharacters pp$1.regexp_eatLoneUnicodePropertyNameOrValue = function(state) { return this.regexp_eatUnicodePropertyValue(state) }; // https://www.ecma-international.org/ecma-262/8.0/#prod-CharacterClass pp$1.regexp_eatCharacterClass = function(state) { if (state.eat(0x5B /* [ */)) { var negate = state.eat(0x5E /* ^ */); var result = this.regexp_classContents(state); if (!state.eat(0x5D /* ] */)) { state.raise("Unterminated character class"); } if (negate && result === CharSetString) { state.raise("Negated character class may contain strings"); } return true } return false }; // https://tc39.es/ecma262/#prod-ClassContents // https://www.ecma-international.org/ecma-262/8.0/#prod-ClassRanges pp$1.regexp_classContents = function(state) { if (state.current() === 0x5D /* ] */) { return CharSetOk } if (state.switchV) { return this.regexp_classSetExpression(state) } this.regexp_nonEmptyClassRanges(state); return CharSetOk }; // https://www.ecma-international.org/ecma-262/8.0/#prod-NonemptyClassRanges // https://www.ecma-international.org/ecma-262/8.0/#prod-NonemptyClassRangesNoDash pp$1.regexp_nonEmptyClassRanges = function(state) { while (this.regexp_eatClassAtom(state)) { var left = state.lastIntValue; if (state.eat(0x2D /* - */) && this.regexp_eatClassAtom(state)) { var right = state.lastIntValue; if (state.switchU && (left === -1 || right === -1)) { state.raise("Invalid character class"); } if (left !== -1 && right !== -1 && left > right) { state.raise("Range out of order in character class"); } } } }; // https://www.ecma-international.org/ecma-262/8.0/#prod-ClassAtom // https://www.ecma-international.org/ecma-262/8.0/#prod-ClassAtomNoDash pp$1.regexp_eatClassAtom = function(state) { var start = state.pos; if (state.eat(0x5C /* \ */)) { if (this.regexp_eatClassEscape(state)) { return true } if (state.switchU) { // Make the same message as V8. var ch$1 = state.current(); if (ch$1 === 0x63 /* c */ || isOctalDigit(ch$1)) { state.raise("Invalid class escape"); } state.raise("Invalid escape"); } state.pos = start; } var ch = state.current(); if (ch !== 0x5D /* ] */) { state.lastIntValue = ch; state.advance(); return true } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-ClassEscape pp$1.regexp_eatClassEscape = function(state) { var start = state.pos; if (state.eat(0x62 /* b */)) { state.lastIntValue = 0x08; /* */ return true } if (state.switchU && state.eat(0x2D /* - */)) { state.lastIntValue = 0x2D; /* - */ return true } if (!state.switchU && state.eat(0x63 /* c */)) { if (this.regexp_eatClassControlLetter(state)) { return true } state.pos = start; } return ( this.regexp_eatCharacterClassEscape(state) || this.regexp_eatCharacterEscape(state) ) }; // https://tc39.es/ecma262/#prod-ClassSetExpression // https://tc39.es/ecma262/#prod-ClassUnion // https://tc39.es/ecma262/#prod-ClassIntersection // https://tc39.es/ecma262/#prod-ClassSubtraction pp$1.regexp_classSetExpression = function(state) { var result = CharSetOk, subResult; if (this.regexp_eatClassSetRange(state)) ; else if (subResult = this.regexp_eatClassSetOperand(state)) { if (subResult === CharSetString) { result = CharSetString; } // https://tc39.es/ecma262/#prod-ClassIntersection var start = state.pos; while (state.eatChars([0x26, 0x26] /* && */)) { if ( state.current() !== 0x26 /* & */ && (subResult = this.regexp_eatClassSetOperand(state)) ) { if (subResult !== CharSetString) { result = CharSetOk; } continue } state.raise("Invalid character in character class"); } if (start !== state.pos) { return result } // https://tc39.es/ecma262/#prod-ClassSubtraction while (state.eatChars([0x2D, 0x2D] /* -- */)) { if (this.regexp_eatClassSetOperand(state)) { continue } state.raise("Invalid character in character class"); } if (start !== state.pos) { return result } } else { state.raise("Invalid character in character class"); } // https://tc39.es/ecma262/#prod-ClassUnion for (;;) { if (this.regexp_eatClassSetRange(state)) { continue } subResult = this.regexp_eatClassSetOperand(state); if (!subResult) { return result } if (subResult === CharSetString) { result = CharSetString; } } }; // https://tc39.es/ecma262/#prod-ClassSetRange pp$1.regexp_eatClassSetRange = function(state) { var start = state.pos; if (this.regexp_eatClassSetCharacter(state)) { var left = state.lastIntValue; if (state.eat(0x2D /* - */) && this.regexp_eatClassSetCharacter(state)) { var right = state.lastIntValue; if (left !== -1 && right !== -1 && left > right) { state.raise("Range out of order in character class"); } return true } state.pos = start; } return false }; // https://tc39.es/ecma262/#prod-ClassSetOperand pp$1.regexp_eatClassSetOperand = function(state) { if (this.regexp_eatClassSetCharacter(state)) { return CharSetOk } return this.regexp_eatClassStringDisjunction(state) || this.regexp_eatNestedClass(state) }; // https://tc39.es/ecma262/#prod-NestedClass pp$1.regexp_eatNestedClass = function(state) { var start = state.pos; if (state.eat(0x5B /* [ */)) { var negate = state.eat(0x5E /* ^ */); var result = this.regexp_classContents(state); if (state.eat(0x5D /* ] */)) { if (negate && result === CharSetString) { state.raise("Negated character class may contain strings"); } return result } state.pos = start; } if (state.eat(0x5C /* \ */)) { var result$1 = this.regexp_eatCharacterClassEscape(state); if (result$1) { return result$1 } state.pos = start; } return null }; // https://tc39.es/ecma262/#prod-ClassStringDisjunction pp$1.regexp_eatClassStringDisjunction = function(state) { var start = state.pos; if (state.eatChars([0x5C, 0x71] /* \q */)) { if (state.eat(0x7B /* { */)) { var result = this.regexp_classStringDisjunctionContents(state); if (state.eat(0x7D /* } */)) { return result } } else { // Make the same message as V8. state.raise("Invalid escape"); } state.pos = start; } return null }; // https://tc39.es/ecma262/#prod-ClassStringDisjunctionContents pp$1.regexp_classStringDisjunctionContents = function(state) { var result = this.regexp_classString(state); while (state.eat(0x7C /* | */)) { if (this.regexp_classString(state) === CharSetString) { result = CharSetString; } } return result }; // https://tc39.es/ecma262/#prod-ClassString // https://tc39.es/ecma262/#prod-NonEmptyClassString pp$1.regexp_classString = function(state) { var count = 0; while (this.regexp_eatClassSetCharacter(state)) { count++; } return count === 1 ? CharSetOk : CharSetString }; // https://tc39.es/ecma262/#prod-ClassSetCharacter pp$1.regexp_eatClassSetCharacter = function(state) { var start = state.pos; if (state.eat(0x5C /* \ */)) { if ( this.regexp_eatCharacterEscape(state) || this.regexp_eatClassSetReservedPunctuator(state) ) { return true } if (state.eat(0x62 /* b */)) { state.lastIntValue = 0x08; /* */ return true } state.pos = start; return false } var ch = state.current(); if (ch < 0 || ch === state.lookahead() && isClassSetReservedDoublePunctuatorCharacter(ch)) { return false } if (isClassSetSyntaxCharacter(ch)) { return false } state.advance(); state.lastIntValue = ch; return true }; // https://tc39.es/ecma262/#prod-ClassSetReservedDoublePunctuator function isClassSetReservedDoublePunctuatorCharacter(ch) { return ( ch === 0x21 /* ! */ || ch >= 0x23 /* # */ && ch <= 0x26 /* & */ || ch >= 0x2A /* * */ && ch <= 0x2C /* , */ || ch === 0x2E /* . */ || ch >= 0x3A /* : */ && ch <= 0x40 /* @ */ || ch === 0x5E /* ^ */ || ch === 0x60 /* ` */ || ch === 0x7E /* ~ */ ) } // https://tc39.es/ecma262/#prod-ClassSetSyntaxCharacter function isClassSetSyntaxCharacter(ch) { return ( ch === 0x28 /* ( */ || ch === 0x29 /* ) */ || ch === 0x2D /* - */ || ch === 0x2F /* / */ || ch >= 0x5B /* [ */ && ch <= 0x5D /* ] */ || ch >= 0x7B /* { */ && ch <= 0x7D /* } */ ) } // https://tc39.es/ecma262/#prod-ClassSetReservedPunctuator pp$1.regexp_eatClassSetReservedPunctuator = function(state) { var ch = state.current(); if (isClassSetReservedPunctuator(ch)) { state.lastIntValue = ch; state.advance(); return true } return false }; // https://tc39.es/ecma262/#prod-ClassSetReservedPunctuator function isClassSetReservedPunctuator(ch) { return ( ch === 0x21 /* ! */ || ch === 0x23 /* # */ || ch === 0x25 /* % */ || ch === 0x26 /* & */ || ch === 0x2C /* , */ || ch === 0x2D /* - */ || ch >= 0x3A /* : */ && ch <= 0x3E /* > */ || ch === 0x40 /* @ */ || ch === 0x60 /* ` */ || ch === 0x7E /* ~ */ ) } // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-ClassControlLetter pp$1.regexp_eatClassControlLetter = function(state) { var ch = state.current(); if (isDecimalDigit(ch) || ch === 0x5F /* _ */) { state.lastIntValue = ch % 0x20; state.advance(); return true } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-HexEscapeSequence pp$1.regexp_eatHexEscapeSequence = function(state) { var start = state.pos; if (state.eat(0x78 /* x */)) { if (this.regexp_eatFixedHexDigits(state, 2)) { return true } if (state.switchU) { state.raise("Invalid escape"); } state.pos = start; } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-DecimalDigits pp$1.regexp_eatDecimalDigits = function(state) { var start = state.pos; var ch = 0; state.lastIntValue = 0; while (isDecimalDigit(ch = state.current())) { state.lastIntValue = 10 * state.lastIntValue + (ch - 0x30 /* 0 */); state.advance(); } return state.pos !== start }; function isDecimalDigit(ch) { return ch >= 0x30 /* 0 */ && ch <= 0x39 /* 9 */ } // https://www.ecma-international.org/ecma-262/8.0/#prod-HexDigits pp$1.regexp_eatHexDigits = function(state) { var start = state.pos; var ch = 0; state.lastIntValue = 0; while (isHexDigit(ch = state.current())) { state.lastIntValue = 16 * state.lastIntValue + hexToInt(ch); state.advance(); } return state.pos !== start }; function isHexDigit(ch) { return ( (ch >= 0x30 /* 0 */ && ch <= 0x39 /* 9 */) || (ch >= 0x41 /* A */ && ch <= 0x46 /* F */) || (ch >= 0x61 /* a */ && ch <= 0x66 /* f */) ) } function hexToInt(ch) { if (ch >= 0x41 /* A */ && ch <= 0x46 /* F */) { return 10 + (ch - 0x41 /* A */) } if (ch >= 0x61 /* a */ && ch <= 0x66 /* f */) { return 10 + (ch - 0x61 /* a */) } return ch - 0x30 /* 0 */ } // https://www.ecma-international.org/ecma-262/8.0/#prod-annexB-LegacyOctalEscapeSequence // Allows only 0-377(octal) i.e. 0-255(decimal). pp$1.regexp_eatLegacyOctalEscapeSequence = function(state) { if (this.regexp_eatOctalDigit(state)) { var n1 = state.lastIntValue; if (this.regexp_eatOctalDigit(state)) { var n2 = state.lastIntValue; if (n1 <= 3 && this.regexp_eatOctalDigit(state)) { state.lastIntValue = n1 * 64 + n2 * 8 + state.lastIntValue; } else { state.lastIntValue = n1 * 8 + n2; } } else { state.lastIntValue = n1; } return true } return false }; // https://www.ecma-international.org/ecma-262/8.0/#prod-OctalDigit pp$1.regexp_eatOctalDigit = function(state) { var ch = state.current(); if (isOctalDigit(ch)) { state.lastIntValue = ch - 0x30; /* 0 */ state.advance(); return true } state.lastIntValue = 0; return false }; function isOctalDigit(ch) { return ch >= 0x30 /* 0 */ && ch <= 0x37 /* 7 */ } // https://www.ecma-international.org/ecma-262/8.0/#prod-Hex4Digits // https://www.ecma-international.org/ecma-262/8.0/#prod-HexDigit // And HexDigit HexDigit in https://www.ecma-international.org/ecma-262/8.0/#prod-HexEscapeSequence pp$1.regexp_eatFixedHexDigits = function(state, length) { var start = state.pos; state.lastIntValue = 0; for (var i = 0; i < length; ++i) { var ch = state.current(); if (!isHexDigit(ch)) { state.pos = start; return false } state.lastIntValue = 16 * state.lastIntValue + hexToInt(ch); state.advance(); } return true }; // Object type used to represent tokens. Note that normally, tokens // simply exist as properties on the parser object. This is only // used for the onToken callback and the external tokenizer. var Token = function Token(p) { this.type = p.type; this.value = p.value; this.start = p.start; this.end = p.end; if (p.options.locations) { this.loc = new SourceLocation(p, p.startLoc, p.endLoc); } if (p.options.ranges) { this.range = [p.start, p.end]; } }; // ## Tokenizer var pp = Parser.prototype; // Move to the next token pp.next = function(ignoreEscapeSequenceInKeyword) { if (!ignoreEscapeSequenceInKeyword && this.type.keyword && this.containsEsc) { this.raiseRecoverable(this.start, "Escape sequence in keyword " + this.type.keyword); } if (this.options.onToken) { this.options.onToken(new Token(this)); } this.lastTokEnd = this.end; this.lastTokStart = this.start; this.lastTokEndLoc = this.endLoc; this.lastTokStartLoc = this.startLoc; this.nextToken(); }; pp.getToken = function() { this.next(); return new Token(this) }; // If we're in an ES6 environment, make parsers iterable if (typeof Symbol !== "undefined") { pp[Symbol.iterator] = function() { var this$1$1 = this; return { next: function () { var token = this$1$1.getToken(); return { done: token.type === types$1.eof, value: token } } } }; } // Toggle strict mode. Re-reads the next number or string to please // pedantic tests (`"use strict"; 010;` should fail). // Read a single token, updating the parser object's token-related // properties. pp.nextToken = function() { var curContext = this.curContext(); if (!curContext || !curContext.preserveSpace) { this.skipSpace(); } this.start = this.pos; if (this.options.locations) { this.startLoc = this.curPosition(); } if (this.pos >= this.input.length) { return this.finishToken(types$1.eof) } if (curContext.override) { return curContext.override(this) } else { this.readToken(this.fullCharCodeAtPos()); } }; pp.readToken = function(code) { // Identifier or keyword. '\uXXXX' sequences are allowed in // identifiers, so '\' also dispatches to that. if (isIdentifierStart(code, this.options.ecmaVersion >= 6) || code === 92 /* '\' */) { return this.readWord() } return this.getTokenFromCode(code) }; pp.fullCharCodeAtPos = function() { var code = this.input.charCodeAt(this.pos); if (code <= 0xd7ff || code >= 0xdc00) { return code } var next = this.input.charCodeAt(this.pos + 1); return next <= 0xdbff || next >= 0xe000 ? code : (code << 10) + next - 0x35fdc00 }; pp.skipBlockComment = function() { var startLoc = this.options.onComment && this.curPosition(); var start = this.pos, end = this.input.indexOf("*/", this.pos += 2); if (end === -1) { this.raise(this.pos - 2, "Unterminated comment"); } this.pos = end + 2; if (this.options.locations) { for (var nextBreak = (void 0), pos = start; (nextBreak = nextLineBreak(this.input, pos, this.pos)) > -1;) { ++this.curLine; pos = this.lineStart = nextBreak; } } if (this.options.onComment) { this.options.onComment(true, this.input.slice(start + 2, end), start, this.pos, startLoc, this.curPosition()); } }; pp.skipLineComment = function(startSkip) { var start = this.pos; var startLoc = this.options.onComment && this.curPosition(); var ch = this.input.charCodeAt(this.pos += startSkip); while (this.pos < this.input.length && !isNewLine(ch)) { ch = this.input.charCodeAt(++this.pos); } if (this.options.onComment) { this.options.onComment(false, this.input.slice(start + startSkip, this.pos), start, this.pos, startLoc, this.curPosition()); } }; // Called at the start of the parse and after every token. Skips // whitespace and comments, and. pp.skipSpace = function() { loop: while (this.pos < this.input.length) { var ch = this.input.charCodeAt(this.pos); switch (ch) { case 32: case 160: // ' ' ++this.pos; break case 13: if (this.input.charCodeAt(this.pos + 1) === 10) { ++this.pos; } case 10: case 8232: case 8233: ++this.pos; if (this.options.locations) { ++this.curLine; this.lineStart = this.pos; } break case 47: // '/' switch (this.input.charCodeAt(this.pos + 1)) { case 42: // '*' this.skipBlockComment(); break case 47: this.skipLineComment(2); break default: break loop } break default: if (ch > 8 && ch < 14 || ch >= 5760 && nonASCIIwhitespace.test(String.fromCharCode(ch))) { ++this.pos; } else { break loop } } } }; // Called at the end of every token. Sets `end`, `val`, and // maintains `context` and `exprAllowed`, and skips the space after // the token, so that the next one's `start` will point at the // right position. pp.finishToken = function(type, val) { this.end = this.pos; if (this.options.locations) { this.endLoc = this.curPosition(); } var prevType = this.type; this.type = type; this.value = val; this.updateContext(prevType); }; // ### Token reading // This is the function that is called to fetch the next token. It // is somewhat obscure, because it works in character codes rather // than characters, and because operator parsing has been inlined // into it. // // All in the name of speed. // pp.readToken_dot = function() { var next = this.input.charCodeAt(this.pos + 1); if (next >= 48 && next <= 57) { return this.readNumber(true) } var next2 = this.input.charCodeAt(this.pos + 2); if (this.options.ecmaVersion >= 6 && next === 46 && next2 === 46) { // 46 = dot '.' this.pos += 3; return this.finishToken(types$1.ellipsis) } else { ++this.pos; return this.finishToken(types$1.dot) } }; pp.readToken_slash = function() { // '/' var next = this.input.charCodeAt(this.pos + 1); if (this.exprAllowed) { ++this.pos; return this.readRegexp() } if (next === 61) { return this.finishOp(types$1.assign, 2) } return this.finishOp(types$1.slash, 1) }; pp.readToken_mult_modulo_exp = function(code) { // '%*' var next = this.input.charCodeAt(this.pos + 1); var size = 1; var tokentype = code === 42 ? types$1.star : types$1.modulo; // exponentiation operator ** and **= if (this.options.ecmaVersion >= 7 && code === 42 && next === 42) { ++size; tokentype = types$1.starstar; next = this.input.charCodeAt(this.pos + 2); } if (next === 61) { return this.finishOp(types$1.assign, size + 1) } return this.finishOp(tokentype, size) }; pp.readToken_pipe_amp = function(code) { // '|&' var next = this.input.charCodeAt(this.pos + 1); if (next === code) { if (this.options.ecmaVersion >= 12) { var next2 = this.input.charCodeAt(this.pos + 2); if (next2 === 61) { return this.finishOp(types$1.assign, 3) } } return this.finishOp(code === 124 ? types$1.logicalOR : types$1.logicalAND, 2) } if (next === 61) { return this.finishOp(types$1.assign, 2) } return this.finishOp(code === 124 ? types$1.bitwiseOR : types$1.bitwiseAND, 1) }; pp.readToken_caret = function() { // '^' var next = this.input.charCodeAt(this.pos + 1); if (next === 61) { return this.finishOp(types$1.assign, 2) } return this.finishOp(types$1.bitwiseXOR, 1) }; pp.readToken_plus_min = function(code) { // '+-' var next = this.input.charCodeAt(this.pos + 1); if (next === code) { if (next === 45 && !this.inModule && this.input.charCodeAt(this.pos + 2) === 62 && (this.lastTokEnd === 0 || lineBreak.test(this.input.slice(this.lastTokEnd, this.pos)))) { // A `-->` line comment this.skipLineComment(3); this.skipSpace(); return this.nextToken() } return this.finishOp(types$1.incDec, 2) } if (next === 61) { return this.finishOp(types$1.assign, 2) } return this.finishOp(types$1.plusMin, 1) }; pp.readToken_lt_gt = function(code) { // '<>' var next = this.input.charCodeAt(this.pos + 1); var size = 1; if (next === code) { size = code === 62 && this.input.charCodeAt(this.pos + 2) === 62 ? 3 : 2; if (this.input.charCodeAt(this.pos + size) === 61) { return this.finishOp(types$1.assign, size + 1) } return this.finishOp(types$1.bitShift, size) } if (next === 33 && code === 60 && !this.inModule && this.input.charCodeAt(this.pos + 2) === 45 && this.input.charCodeAt(this.pos + 3) === 45) { // `