UNPKG

78.6 kBJavaScriptView Raw
1/*!
2
3 diff v7.0.0
4
5BSD 3-Clause License
6
7Copyright (c) 2009-2015, Kevin Decker <kpdecker@gmail.com>
8All rights reserved.
9
10Redistribution and use in source and binary forms, with or without
11modification, are permitted provided that the following conditions are met:
12
131. Redistributions of source code must retain the above copyright notice, this
14 list of conditions and the following disclaimer.
15
162. Redistributions in binary form must reproduce the above copyright notice,
17 this list of conditions and the following disclaimer in the documentation
18 and/or other materials provided with the distribution.
19
203. Neither the name of the copyright holder nor the names of its
21 contributors may be used to endorse or promote products derived from
22 this software without specific prior written permission.
23
24THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
28FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
35@license
36*/
37(function (global, factory) {
38 typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
39 typeof define === 'function' && define.amd ? define(['exports'], factory) :
40 (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.Diff = {}));
41})(this, (function (exports) { 'use strict';
42
43 function Diff() {}
44 Diff.prototype = {
45 diff: function diff(oldString, newString) {
46 var _options$timeout;
47 var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
48 var callback = options.callback;
49 if (typeof options === 'function') {
50 callback = options;
51 options = {};
52 }
53 var self = this;
54 function done(value) {
55 value = self.postProcess(value, options);
56 if (callback) {
57 setTimeout(function () {
58 callback(value);
59 }, 0);
60 return true;
61 } else {
62 return value;
63 }
64 }
65
66 // Allow subclasses to massage the input prior to running
67 oldString = this.castInput(oldString, options);
68 newString = this.castInput(newString, options);
69 oldString = this.removeEmpty(this.tokenize(oldString, options));
70 newString = this.removeEmpty(this.tokenize(newString, options));
71 var newLen = newString.length,
72 oldLen = oldString.length;
73 var editLength = 1;
74 var maxEditLength = newLen + oldLen;
75 if (options.maxEditLength != null) {
76 maxEditLength = Math.min(maxEditLength, options.maxEditLength);
77 }
78 var maxExecutionTime = (_options$timeout = options.timeout) !== null && _options$timeout !== void 0 ? _options$timeout : Infinity;
79 var abortAfterTimestamp = Date.now() + maxExecutionTime;
80 var bestPath = [{
81 oldPos: -1,
82 lastComponent: undefined
83 }];
84
85 // Seed editLength = 0, i.e. the content starts with the same values
86 var newPos = this.extractCommon(bestPath[0], newString, oldString, 0, options);
87 if (bestPath[0].oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
88 // Identity per the equality and tokenizer
89 return done(buildValues(self, bestPath[0].lastComponent, newString, oldString, self.useLongestToken));
90 }
91
92 // Once we hit the right edge of the edit graph on some diagonal k, we can
93 // definitely reach the end of the edit graph in no more than k edits, so
94 // there's no point in considering any moves to diagonal k+1 any more (from
95 // which we're guaranteed to need at least k+1 more edits).
96 // Similarly, once we've reached the bottom of the edit graph, there's no
97 // point considering moves to lower diagonals.
98 // We record this fact by setting minDiagonalToConsider and
99 // maxDiagonalToConsider to some finite value once we've hit the edge of
100 // the edit graph.
101 // This optimization is not faithful to the original algorithm presented in
102 // Myers's paper, which instead pointlessly extends D-paths off the end of
103 // the edit graph - see page 7 of Myers's paper which notes this point
104 // explicitly and illustrates it with a diagram. This has major performance
105 // implications for some common scenarios. For instance, to compute a diff
106 // where the new text simply appends d characters on the end of the
107 // original text of length n, the true Myers algorithm will take O(n+d^2)
108 // time while this optimization needs only O(n+d) time.
109 var minDiagonalToConsider = -Infinity,
110 maxDiagonalToConsider = Infinity;
111
112 // Main worker method. checks all permutations of a given edit length for acceptance.
113 function execEditLength() {
114 for (var diagonalPath = Math.max(minDiagonalToConsider, -editLength); diagonalPath <= Math.min(maxDiagonalToConsider, editLength); diagonalPath += 2) {
115 var basePath = void 0;
116 var removePath = bestPath[diagonalPath - 1],
117 addPath = bestPath[diagonalPath + 1];
118 if (removePath) {
119 // No one else is going to attempt to use this value, clear it
120 bestPath[diagonalPath - 1] = undefined;
121 }
122 var canAdd = false;
123 if (addPath) {
124 // what newPos will be after we do an insertion:
125 var addPathNewPos = addPath.oldPos - diagonalPath;
126 canAdd = addPath && 0 <= addPathNewPos && addPathNewPos < newLen;
127 }
128 var canRemove = removePath && removePath.oldPos + 1 < oldLen;
129 if (!canAdd && !canRemove) {
130 // If this path is a terminal then prune
131 bestPath[diagonalPath] = undefined;
132 continue;
133 }
134
135 // Select the diagonal that we want to branch from. We select the prior
136 // path whose position in the old string is the farthest from the origin
137 // and does not pass the bounds of the diff graph
138 if (!canRemove || canAdd && removePath.oldPos < addPath.oldPos) {
139 basePath = self.addToPath(addPath, true, false, 0, options);
140 } else {
141 basePath = self.addToPath(removePath, false, true, 1, options);
142 }
143 newPos = self.extractCommon(basePath, newString, oldString, diagonalPath, options);
144 if (basePath.oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
145 // If we have hit the end of both strings, then we are done
146 return done(buildValues(self, basePath.lastComponent, newString, oldString, self.useLongestToken));
147 } else {
148 bestPath[diagonalPath] = basePath;
149 if (basePath.oldPos + 1 >= oldLen) {
150 maxDiagonalToConsider = Math.min(maxDiagonalToConsider, diagonalPath - 1);
151 }
152 if (newPos + 1 >= newLen) {
153 minDiagonalToConsider = Math.max(minDiagonalToConsider, diagonalPath + 1);
154 }
155 }
156 }
157 editLength++;
158 }
159
160 // Performs the length of edit iteration. Is a bit fugly as this has to support the
161 // sync and async mode which is never fun. Loops over execEditLength until a value
162 // is produced, or until the edit length exceeds options.maxEditLength (if given),
163 // in which case it will return undefined.
164 if (callback) {
165 (function exec() {
166 setTimeout(function () {
167 if (editLength > maxEditLength || Date.now() > abortAfterTimestamp) {
168 return callback();
169 }
170 if (!execEditLength()) {
171 exec();
172 }
173 }, 0);
174 })();
175 } else {
176 while (editLength <= maxEditLength && Date.now() <= abortAfterTimestamp) {
177 var ret = execEditLength();
178 if (ret) {
179 return ret;
180 }
181 }
182 }
183 },
184 addToPath: function addToPath(path, added, removed, oldPosInc, options) {
185 var last = path.lastComponent;
186 if (last && !options.oneChangePerToken && last.added === added && last.removed === removed) {
187 return {
188 oldPos: path.oldPos + oldPosInc,
189 lastComponent: {
190 count: last.count + 1,
191 added: added,
192 removed: removed,
193 previousComponent: last.previousComponent
194 }
195 };
196 } else {
197 return {
198 oldPos: path.oldPos + oldPosInc,
199 lastComponent: {
200 count: 1,
201 added: added,
202 removed: removed,
203 previousComponent: last
204 }
205 };
206 }
207 },
208 extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath, options) {
209 var newLen = newString.length,
210 oldLen = oldString.length,
211 oldPos = basePath.oldPos,
212 newPos = oldPos - diagonalPath,
213 commonCount = 0;
214 while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(oldString[oldPos + 1], newString[newPos + 1], options)) {
215 newPos++;
216 oldPos++;
217 commonCount++;
218 if (options.oneChangePerToken) {
219 basePath.lastComponent = {
220 count: 1,
221 previousComponent: basePath.lastComponent,
222 added: false,
223 removed: false
224 };
225 }
226 }
227 if (commonCount && !options.oneChangePerToken) {
228 basePath.lastComponent = {
229 count: commonCount,
230 previousComponent: basePath.lastComponent,
231 added: false,
232 removed: false
233 };
234 }
235 basePath.oldPos = oldPos;
236 return newPos;
237 },
238 equals: function equals(left, right, options) {
239 if (options.comparator) {
240 return options.comparator(left, right);
241 } else {
242 return left === right || options.ignoreCase && left.toLowerCase() === right.toLowerCase();
243 }
244 },
245 removeEmpty: function removeEmpty(array) {
246 var ret = [];
247 for (var i = 0; i < array.length; i++) {
248 if (array[i]) {
249 ret.push(array[i]);
250 }
251 }
252 return ret;
253 },
254 castInput: function castInput(value) {
255 return value;
256 },
257 tokenize: function tokenize(value) {
258 return Array.from(value);
259 },
260 join: function join(chars) {
261 return chars.join('');
262 },
263 postProcess: function postProcess(changeObjects) {
264 return changeObjects;
265 }
266 };
267 function buildValues(diff, lastComponent, newString, oldString, useLongestToken) {
268 // First we convert our linked list of components in reverse order to an
269 // array in the right order:
270 var components = [];
271 var nextComponent;
272 while (lastComponent) {
273 components.push(lastComponent);
274 nextComponent = lastComponent.previousComponent;
275 delete lastComponent.previousComponent;
276 lastComponent = nextComponent;
277 }
278 components.reverse();
279 var componentPos = 0,
280 componentLen = components.length,
281 newPos = 0,
282 oldPos = 0;
283 for (; componentPos < componentLen; componentPos++) {
284 var component = components[componentPos];
285 if (!component.removed) {
286 if (!component.added && useLongestToken) {
287 var value = newString.slice(newPos, newPos + component.count);
288 value = value.map(function (value, i) {
289 var oldValue = oldString[oldPos + i];
290 return oldValue.length > value.length ? oldValue : value;
291 });
292 component.value = diff.join(value);
293 } else {
294 component.value = diff.join(newString.slice(newPos, newPos + component.count));
295 }
296 newPos += component.count;
297
298 // Common case
299 if (!component.added) {
300 oldPos += component.count;
301 }
302 } else {
303 component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));
304 oldPos += component.count;
305 }
306 }
307 return components;
308 }
309
310 var characterDiff = new Diff();
311 function diffChars(oldStr, newStr, options) {
312 return characterDiff.diff(oldStr, newStr, options);
313 }
314
315 function longestCommonPrefix(str1, str2) {
316 var i;
317 for (i = 0; i < str1.length && i < str2.length; i++) {
318 if (str1[i] != str2[i]) {
319 return str1.slice(0, i);
320 }
321 }
322 return str1.slice(0, i);
323 }
324 function longestCommonSuffix(str1, str2) {
325 var i;
326
327 // Unlike longestCommonPrefix, we need a special case to handle all scenarios
328 // where we return the empty string since str1.slice(-0) will return the
329 // entire string.
330 if (!str1 || !str2 || str1[str1.length - 1] != str2[str2.length - 1]) {
331 return '';
332 }
333 for (i = 0; i < str1.length && i < str2.length; i++) {
334 if (str1[str1.length - (i + 1)] != str2[str2.length - (i + 1)]) {
335 return str1.slice(-i);
336 }
337 }
338 return str1.slice(-i);
339 }
340 function replacePrefix(string, oldPrefix, newPrefix) {
341 if (string.slice(0, oldPrefix.length) != oldPrefix) {
342 throw Error("string ".concat(JSON.stringify(string), " doesn't start with prefix ").concat(JSON.stringify(oldPrefix), "; this is a bug"));
343 }
344 return newPrefix + string.slice(oldPrefix.length);
345 }
346 function replaceSuffix(string, oldSuffix, newSuffix) {
347 if (!oldSuffix) {
348 return string + newSuffix;
349 }
350 if (string.slice(-oldSuffix.length) != oldSuffix) {
351 throw Error("string ".concat(JSON.stringify(string), " doesn't end with suffix ").concat(JSON.stringify(oldSuffix), "; this is a bug"));
352 }
353 return string.slice(0, -oldSuffix.length) + newSuffix;
354 }
355 function removePrefix(string, oldPrefix) {
356 return replacePrefix(string, oldPrefix, '');
357 }
358 function removeSuffix(string, oldSuffix) {
359 return replaceSuffix(string, oldSuffix, '');
360 }
361 function maximumOverlap(string1, string2) {
362 return string2.slice(0, overlapCount(string1, string2));
363 }
364
365 // Nicked from https://stackoverflow.com/a/60422853/1709587
366 function overlapCount(a, b) {
367 // Deal with cases where the strings differ in length
368 var startA = 0;
369 if (a.length > b.length) {
370 startA = a.length - b.length;
371 }
372 var endB = b.length;
373 if (a.length < b.length) {
374 endB = a.length;
375 }
376 // Create a back-reference for each index
377 // that should be followed in case of a mismatch.
378 // We only need B to make these references:
379 var map = Array(endB);
380 var k = 0; // Index that lags behind j
381 map[0] = 0;
382 for (var j = 1; j < endB; j++) {
383 if (b[j] == b[k]) {
384 map[j] = map[k]; // skip over the same character (optional optimisation)
385 } else {
386 map[j] = k;
387 }
388 while (k > 0 && b[j] != b[k]) {
389 k = map[k];
390 }
391 if (b[j] == b[k]) {
392 k++;
393 }
394 }
395 // Phase 2: use these references while iterating over A
396 k = 0;
397 for (var i = startA; i < a.length; i++) {
398 while (k > 0 && a[i] != b[k]) {
399 k = map[k];
400 }
401 if (a[i] == b[k]) {
402 k++;
403 }
404 }
405 return k;
406 }
407
408 /**
409 * Returns true if the string consistently uses Windows line endings.
410 */
411 function hasOnlyWinLineEndings(string) {
412 return string.includes('\r\n') && !string.startsWith('\n') && !string.match(/[^\r]\n/);
413 }
414
415 /**
416 * Returns true if the string consistently uses Unix line endings.
417 */
418 function hasOnlyUnixLineEndings(string) {
419 return !string.includes('\r\n') && string.includes('\n');
420 }
421
422 // Based on https://en.wikipedia.org/wiki/Latin_script_in_Unicode
423 //
424 // Ranges and exceptions:
425 // Latin-1 Supplement, 0080–00FF
426 // - U+00D7 × Multiplication sign
427 // - U+00F7 ÷ Division sign
428 // Latin Extended-A, 0100–017F
429 // Latin Extended-B, 0180–024F
430 // IPA Extensions, 0250–02AF
431 // Spacing Modifier Letters, 02B0–02FF
432 // - U+02C7 ˇ &#711; Caron
433 // - U+02D8 ˘ &#728; Breve
434 // - U+02D9 ˙ &#729; Dot Above
435 // - U+02DA ˚ &#730; Ring Above
436 // - U+02DB ˛ &#731; Ogonek
437 // - U+02DC ˜ &#732; Small Tilde
438 // - U+02DD ˝ &#733; Double Acute Accent
439 // Latin Extended Additional, 1E00–1EFF
440 var extendedWordChars = "a-zA-Z0-9_\\u{C0}-\\u{FF}\\u{D8}-\\u{F6}\\u{F8}-\\u{2C6}\\u{2C8}-\\u{2D7}\\u{2DE}-\\u{2FF}\\u{1E00}-\\u{1EFF}";
441
442 // Each token is one of the following:
443 // - A punctuation mark plus the surrounding whitespace
444 // - A word plus the surrounding whitespace
445 // - Pure whitespace (but only in the special case where this the entire text
446 // is just whitespace)
447 //
448 // We have to include surrounding whitespace in the tokens because the two
449 // alternative approaches produce horribly broken results:
450 // * If we just discard the whitespace, we can't fully reproduce the original
451 // text from the sequence of tokens and any attempt to render the diff will
452 // get the whitespace wrong.
453 // * If we have separate tokens for whitespace, then in a typical text every
454 // second token will be a single space character. But this often results in
455 // the optimal diff between two texts being a perverse one that preserves
456 // the spaces between words but deletes and reinserts actual common words.
457 // See https://github.com/kpdecker/jsdiff/issues/160#issuecomment-1866099640
458 // for an example.
459 //
460 // Keeping the surrounding whitespace of course has implications for .equals
461 // and .join, not just .tokenize.
462
463 // This regex does NOT fully implement the tokenization rules described above.
464 // Instead, it gives runs of whitespace their own "token". The tokenize method
465 // then handles stitching whitespace tokens onto adjacent word or punctuation
466 // tokens.
467 var tokenizeIncludingWhitespace = new RegExp("[".concat(extendedWordChars, "]+|\\s+|[^").concat(extendedWordChars, "]"), 'ug');
468 var wordDiff = new Diff();
469 wordDiff.equals = function (left, right, options) {
470 if (options.ignoreCase) {
471 left = left.toLowerCase();
472 right = right.toLowerCase();
473 }
474 return left.trim() === right.trim();
475 };
476 wordDiff.tokenize = function (value) {
477 var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
478 var parts;
479 if (options.intlSegmenter) {
480 if (options.intlSegmenter.resolvedOptions().granularity != 'word') {
481 throw new Error('The segmenter passed must have a granularity of "word"');
482 }
483 parts = Array.from(options.intlSegmenter.segment(value), function (segment) {
484 return segment.segment;
485 });
486 } else {
487 parts = value.match(tokenizeIncludingWhitespace) || [];
488 }
489 var tokens = [];
490 var prevPart = null;
491 parts.forEach(function (part) {
492 if (/\s/.test(part)) {
493 if (prevPart == null) {
494 tokens.push(part);
495 } else {
496 tokens.push(tokens.pop() + part);
497 }
498 } else if (/\s/.test(prevPart)) {
499 if (tokens[tokens.length - 1] == prevPart) {
500 tokens.push(tokens.pop() + part);
501 } else {
502 tokens.push(prevPart + part);
503 }
504 } else {
505 tokens.push(part);
506 }
507 prevPart = part;
508 });
509 return tokens;
510 };
511 wordDiff.join = function (tokens) {
512 // Tokens being joined here will always have appeared consecutively in the
513 // same text, so we can simply strip off the leading whitespace from all the
514 // tokens except the first (and except any whitespace-only tokens - but such
515 // a token will always be the first and only token anyway) and then join them
516 // and the whitespace around words and punctuation will end up correct.
517 return tokens.map(function (token, i) {
518 if (i == 0) {
519 return token;
520 } else {
521 return token.replace(/^\s+/, '');
522 }
523 }).join('');
524 };
525 wordDiff.postProcess = function (changes, options) {
526 if (!changes || options.oneChangePerToken) {
527 return changes;
528 }
529 var lastKeep = null;
530 // Change objects representing any insertion or deletion since the last
531 // "keep" change object. There can be at most one of each.
532 var insertion = null;
533 var deletion = null;
534 changes.forEach(function (change) {
535 if (change.added) {
536 insertion = change;
537 } else if (change.removed) {
538 deletion = change;
539 } else {
540 if (insertion || deletion) {
541 // May be false at start of text
542 dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, change);
543 }
544 lastKeep = change;
545 insertion = null;
546 deletion = null;
547 }
548 });
549 if (insertion || deletion) {
550 dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, null);
551 }
552 return changes;
553 };
554 function diffWords(oldStr, newStr, options) {
555 // This option has never been documented and never will be (it's clearer to
556 // just call `diffWordsWithSpace` directly if you need that behavior), but
557 // has existed in jsdiff for a long time, so we retain support for it here
558 // for the sake of backwards compatibility.
559 if ((options === null || options === void 0 ? void 0 : options.ignoreWhitespace) != null && !options.ignoreWhitespace) {
560 return diffWordsWithSpace(oldStr, newStr, options);
561 }
562 return wordDiff.diff(oldStr, newStr, options);
563 }
564 function dedupeWhitespaceInChangeObjects(startKeep, deletion, insertion, endKeep) {
565 // Before returning, we tidy up the leading and trailing whitespace of the
566 // change objects to eliminate cases where trailing whitespace in one object
567 // is repeated as leading whitespace in the next.
568 // Below are examples of the outcomes we want here to explain the code.
569 // I=insert, K=keep, D=delete
570 // 1. diffing 'foo bar baz' vs 'foo baz'
571 // Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
572 // After cleanup, we want: K:'foo ' D:'bar ' K:'baz'
573 //
574 // 2. Diffing 'foo bar baz' vs 'foo qux baz'
575 // Prior to cleanup, we have K:'foo ' D:' bar ' I:' qux ' K:' baz'
576 // After cleanup, we want K:'foo ' D:'bar' I:'qux' K:' baz'
577 //
578 // 3. Diffing 'foo\nbar baz' vs 'foo baz'
579 // Prior to cleanup, we have K:'foo ' D:'\nbar ' K:' baz'
580 // After cleanup, we want K'foo' D:'\nbar' K:' baz'
581 //
582 // 4. Diffing 'foo baz' vs 'foo\nbar baz'
583 // Prior to cleanup, we have K:'foo\n' I:'\nbar ' K:' baz'
584 // After cleanup, we ideally want K'foo' I:'\nbar' K:' baz'
585 // but don't actually manage this currently (the pre-cleanup change
586 // objects don't contain enough information to make it possible).
587 //
588 // 5. Diffing 'foo bar baz' vs 'foo baz'
589 // Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
590 // After cleanup, we want K:'foo ' D:' bar ' K:'baz'
591 //
592 // Our handling is unavoidably imperfect in the case where there's a single
593 // indel between keeps and the whitespace has changed. For instance, consider
594 // diffing 'foo\tbar\nbaz' vs 'foo baz'. Unless we create an extra change
595 // object to represent the insertion of the space character (which isn't even
596 // a token), we have no way to avoid losing information about the texts'
597 // original whitespace in the result we return. Still, we do our best to
598 // output something that will look sensible if we e.g. print it with
599 // insertions in green and deletions in red.
600
601 // Between two "keep" change objects (or before the first or after the last
602 // change object), we can have either:
603 // * A "delete" followed by an "insert"
604 // * Just an "insert"
605 // * Just a "delete"
606 // We handle the three cases separately.
607 if (deletion && insertion) {
608 var oldWsPrefix = deletion.value.match(/^\s*/)[0];
609 var oldWsSuffix = deletion.value.match(/\s*$/)[0];
610 var newWsPrefix = insertion.value.match(/^\s*/)[0];
611 var newWsSuffix = insertion.value.match(/\s*$/)[0];
612 if (startKeep) {
613 var commonWsPrefix = longestCommonPrefix(oldWsPrefix, newWsPrefix);
614 startKeep.value = replaceSuffix(startKeep.value, newWsPrefix, commonWsPrefix);
615 deletion.value = removePrefix(deletion.value, commonWsPrefix);
616 insertion.value = removePrefix(insertion.value, commonWsPrefix);
617 }
618 if (endKeep) {
619 var commonWsSuffix = longestCommonSuffix(oldWsSuffix, newWsSuffix);
620 endKeep.value = replacePrefix(endKeep.value, newWsSuffix, commonWsSuffix);
621 deletion.value = removeSuffix(deletion.value, commonWsSuffix);
622 insertion.value = removeSuffix(insertion.value, commonWsSuffix);
623 }
624 } else if (insertion) {
625 // The whitespaces all reflect what was in the new text rather than
626 // the old, so we essentially have no information about whitespace
627 // insertion or deletion. We just want to dedupe the whitespace.
628 // We do that by having each change object keep its trailing
629 // whitespace and deleting duplicate leading whitespace where
630 // present.
631 if (startKeep) {
632 insertion.value = insertion.value.replace(/^\s*/, '');
633 }
634 if (endKeep) {
635 endKeep.value = endKeep.value.replace(/^\s*/, '');
636 }
637 // otherwise we've got a deletion and no insertion
638 } else if (startKeep && endKeep) {
639 var newWsFull = endKeep.value.match(/^\s*/)[0],
640 delWsStart = deletion.value.match(/^\s*/)[0],
641 delWsEnd = deletion.value.match(/\s*$/)[0];
642
643 // Any whitespace that comes straight after startKeep in both the old and
644 // new texts, assign to startKeep and remove from the deletion.
645 var newWsStart = longestCommonPrefix(newWsFull, delWsStart);
646 deletion.value = removePrefix(deletion.value, newWsStart);
647
648 // Any whitespace that comes straight before endKeep in both the old and
649 // new texts, and hasn't already been assigned to startKeep, assign to
650 // endKeep and remove from the deletion.
651 var newWsEnd = longestCommonSuffix(removePrefix(newWsFull, newWsStart), delWsEnd);
652 deletion.value = removeSuffix(deletion.value, newWsEnd);
653 endKeep.value = replacePrefix(endKeep.value, newWsFull, newWsEnd);
654
655 // If there's any whitespace from the new text that HASN'T already been
656 // assigned, assign it to the start:
657 startKeep.value = replaceSuffix(startKeep.value, newWsFull, newWsFull.slice(0, newWsFull.length - newWsEnd.length));
658 } else if (endKeep) {
659 // We are at the start of the text. Preserve all the whitespace on
660 // endKeep, and just remove whitespace from the end of deletion to the
661 // extent that it overlaps with the start of endKeep.
662 var endKeepWsPrefix = endKeep.value.match(/^\s*/)[0];
663 var deletionWsSuffix = deletion.value.match(/\s*$/)[0];
664 var overlap = maximumOverlap(deletionWsSuffix, endKeepWsPrefix);
665 deletion.value = removeSuffix(deletion.value, overlap);
666 } else if (startKeep) {
667 // We are at the END of the text. Preserve all the whitespace on
668 // startKeep, and just remove whitespace from the start of deletion to
669 // the extent that it overlaps with the end of startKeep.
670 var startKeepWsSuffix = startKeep.value.match(/\s*$/)[0];
671 var deletionWsPrefix = deletion.value.match(/^\s*/)[0];
672 var _overlap = maximumOverlap(startKeepWsSuffix, deletionWsPrefix);
673 deletion.value = removePrefix(deletion.value, _overlap);
674 }
675 }
676 var wordWithSpaceDiff = new Diff();
677 wordWithSpaceDiff.tokenize = function (value) {
678 // Slightly different to the tokenizeIncludingWhitespace regex used above in
679 // that this one treats each individual newline as a distinct tokens, rather
680 // than merging them into other surrounding whitespace. This was requested
681 // in https://github.com/kpdecker/jsdiff/issues/180 &
682 // https://github.com/kpdecker/jsdiff/issues/211
683 var regex = new RegExp("(\\r?\\n)|[".concat(extendedWordChars, "]+|[^\\S\\n\\r]+|[^").concat(extendedWordChars, "]"), 'ug');
684 return value.match(regex) || [];
685 };
686 function diffWordsWithSpace(oldStr, newStr, options) {
687 return wordWithSpaceDiff.diff(oldStr, newStr, options);
688 }
689
690 function generateOptions(options, defaults) {
691 if (typeof options === 'function') {
692 defaults.callback = options;
693 } else if (options) {
694 for (var name in options) {
695 /* istanbul ignore else */
696 if (options.hasOwnProperty(name)) {
697 defaults[name] = options[name];
698 }
699 }
700 }
701 return defaults;
702 }
703
704 var lineDiff = new Diff();
705 lineDiff.tokenize = function (value, options) {
706 if (options.stripTrailingCr) {
707 // remove one \r before \n to match GNU diff's --strip-trailing-cr behavior
708 value = value.replace(/\r\n/g, '\n');
709 }
710 var retLines = [],
711 linesAndNewlines = value.split(/(\n|\r\n)/);
712
713 // Ignore the final empty token that occurs if the string ends with a new line
714 if (!linesAndNewlines[linesAndNewlines.length - 1]) {
715 linesAndNewlines.pop();
716 }
717
718 // Merge the content and line separators into single tokens
719 for (var i = 0; i < linesAndNewlines.length; i++) {
720 var line = linesAndNewlines[i];
721 if (i % 2 && !options.newlineIsToken) {
722 retLines[retLines.length - 1] += line;
723 } else {
724 retLines.push(line);
725 }
726 }
727 return retLines;
728 };
729 lineDiff.equals = function (left, right, options) {
730 // If we're ignoring whitespace, we need to normalise lines by stripping
731 // whitespace before checking equality. (This has an annoying interaction
732 // with newlineIsToken that requires special handling: if newlines get their
733 // own token, then we DON'T want to trim the *newline* tokens down to empty
734 // strings, since this would cause us to treat whitespace-only line content
735 // as equal to a separator between lines, which would be weird and
736 // inconsistent with the documented behavior of the options.)
737 if (options.ignoreWhitespace) {
738 if (!options.newlineIsToken || !left.includes('\n')) {
739 left = left.trim();
740 }
741 if (!options.newlineIsToken || !right.includes('\n')) {
742 right = right.trim();
743 }
744 } else if (options.ignoreNewlineAtEof && !options.newlineIsToken) {
745 if (left.endsWith('\n')) {
746 left = left.slice(0, -1);
747 }
748 if (right.endsWith('\n')) {
749 right = right.slice(0, -1);
750 }
751 }
752 return Diff.prototype.equals.call(this, left, right, options);
753 };
754 function diffLines(oldStr, newStr, callback) {
755 return lineDiff.diff(oldStr, newStr, callback);
756 }
757
758 // Kept for backwards compatibility. This is a rather arbitrary wrapper method
759 // that just calls `diffLines` with `ignoreWhitespace: true`. It's confusing to
760 // have two ways to do exactly the same thing in the API, so we no longer
761 // document this one (library users should explicitly use `diffLines` with
762 // `ignoreWhitespace: true` instead) but we keep it around to maintain
763 // compatibility with code that used old versions.
764 function diffTrimmedLines(oldStr, newStr, callback) {
765 var options = generateOptions(callback, {
766 ignoreWhitespace: true
767 });
768 return lineDiff.diff(oldStr, newStr, options);
769 }
770
771 var sentenceDiff = new Diff();
772 sentenceDiff.tokenize = function (value) {
773 return value.split(/(\S.+?[.!?])(?=\s+|$)/);
774 };
775 function diffSentences(oldStr, newStr, callback) {
776 return sentenceDiff.diff(oldStr, newStr, callback);
777 }
778
779 var cssDiff = new Diff();
780 cssDiff.tokenize = function (value) {
781 return value.split(/([{}:;,]|\s+)/);
782 };
783 function diffCss(oldStr, newStr, callback) {
784 return cssDiff.diff(oldStr, newStr, callback);
785 }
786
787 function ownKeys(e, r) {
788 var t = Object.keys(e);
789 if (Object.getOwnPropertySymbols) {
790 var o = Object.getOwnPropertySymbols(e);
791 r && (o = o.filter(function (r) {
792 return Object.getOwnPropertyDescriptor(e, r).enumerable;
793 })), t.push.apply(t, o);
794 }
795 return t;
796 }
797 function _objectSpread2(e) {
798 for (var r = 1; r < arguments.length; r++) {
799 var t = null != arguments[r] ? arguments[r] : {};
800 r % 2 ? ownKeys(Object(t), !0).forEach(function (r) {
801 _defineProperty(e, r, t[r]);
802 }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) {
803 Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r));
804 });
805 }
806 return e;
807 }
808 function _toPrimitive(t, r) {
809 if ("object" != typeof t || !t) return t;
810 var e = t[Symbol.toPrimitive];
811 if (void 0 !== e) {
812 var i = e.call(t, r || "default");
813 if ("object" != typeof i) return i;
814 throw new TypeError("@@toPrimitive must return a primitive value.");
815 }
816 return ("string" === r ? String : Number)(t);
817 }
818 function _toPropertyKey(t) {
819 var i = _toPrimitive(t, "string");
820 return "symbol" == typeof i ? i : i + "";
821 }
822 function _typeof(o) {
823 "@babel/helpers - typeof";
824
825 return _typeof = "function" == typeof Symbol && "symbol" == typeof Symbol.iterator ? function (o) {
826 return typeof o;
827 } : function (o) {
828 return o && "function" == typeof Symbol && o.constructor === Symbol && o !== Symbol.prototype ? "symbol" : typeof o;
829 }, _typeof(o);
830 }
831 function _defineProperty(obj, key, value) {
832 key = _toPropertyKey(key);
833 if (key in obj) {
834 Object.defineProperty(obj, key, {
835 value: value,
836 enumerable: true,
837 configurable: true,
838 writable: true
839 });
840 } else {
841 obj[key] = value;
842 }
843 return obj;
844 }
845 function _toConsumableArray(arr) {
846 return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread();
847 }
848 function _arrayWithoutHoles(arr) {
849 if (Array.isArray(arr)) return _arrayLikeToArray(arr);
850 }
851 function _iterableToArray(iter) {
852 if (typeof Symbol !== "undefined" && iter[Symbol.iterator] != null || iter["@@iterator"] != null) return Array.from(iter);
853 }
854 function _unsupportedIterableToArray(o, minLen) {
855 if (!o) return;
856 if (typeof o === "string") return _arrayLikeToArray(o, minLen);
857 var n = Object.prototype.toString.call(o).slice(8, -1);
858 if (n === "Object" && o.constructor) n = o.constructor.name;
859 if (n === "Map" || n === "Set") return Array.from(o);
860 if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
861 }
862 function _arrayLikeToArray(arr, len) {
863 if (len == null || len > arr.length) len = arr.length;
864 for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
865 return arr2;
866 }
867 function _nonIterableSpread() {
868 throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
869 }
870
871 var jsonDiff = new Diff();
872 // Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
873 // dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
874 jsonDiff.useLongestToken = true;
875 jsonDiff.tokenize = lineDiff.tokenize;
876 jsonDiff.castInput = function (value, options) {
877 var undefinedReplacement = options.undefinedReplacement,
878 _options$stringifyRep = options.stringifyReplacer,
879 stringifyReplacer = _options$stringifyRep === void 0 ? function (k, v) {
880 return typeof v === 'undefined' ? undefinedReplacement : v;
881 } : _options$stringifyRep;
882 return typeof value === 'string' ? value : JSON.stringify(canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, ' ');
883 };
884 jsonDiff.equals = function (left, right, options) {
885 return Diff.prototype.equals.call(jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'), options);
886 };
887 function diffJson(oldObj, newObj, options) {
888 return jsonDiff.diff(oldObj, newObj, options);
889 }
890
891 // This function handles the presence of circular references by bailing out when encountering an
892 // object that is already on the "stack" of items being processed. Accepts an optional replacer
893 function canonicalize(obj, stack, replacementStack, replacer, key) {
894 stack = stack || [];
895 replacementStack = replacementStack || [];
896 if (replacer) {
897 obj = replacer(key, obj);
898 }
899 var i;
900 for (i = 0; i < stack.length; i += 1) {
901 if (stack[i] === obj) {
902 return replacementStack[i];
903 }
904 }
905 var canonicalizedObj;
906 if ('[object Array]' === Object.prototype.toString.call(obj)) {
907 stack.push(obj);
908 canonicalizedObj = new Array(obj.length);
909 replacementStack.push(canonicalizedObj);
910 for (i = 0; i < obj.length; i += 1) {
911 canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, key);
912 }
913 stack.pop();
914 replacementStack.pop();
915 return canonicalizedObj;
916 }
917 if (obj && obj.toJSON) {
918 obj = obj.toJSON();
919 }
920 if (_typeof(obj) === 'object' && obj !== null) {
921 stack.push(obj);
922 canonicalizedObj = {};
923 replacementStack.push(canonicalizedObj);
924 var sortedKeys = [],
925 _key;
926 for (_key in obj) {
927 /* istanbul ignore else */
928 if (Object.prototype.hasOwnProperty.call(obj, _key)) {
929 sortedKeys.push(_key);
930 }
931 }
932 sortedKeys.sort();
933 for (i = 0; i < sortedKeys.length; i += 1) {
934 _key = sortedKeys[i];
935 canonicalizedObj[_key] = canonicalize(obj[_key], stack, replacementStack, replacer, _key);
936 }
937 stack.pop();
938 replacementStack.pop();
939 } else {
940 canonicalizedObj = obj;
941 }
942 return canonicalizedObj;
943 }
944
945 var arrayDiff = new Diff();
946 arrayDiff.tokenize = function (value) {
947 return value.slice();
948 };
949 arrayDiff.join = arrayDiff.removeEmpty = function (value) {
950 return value;
951 };
952 function diffArrays(oldArr, newArr, callback) {
953 return arrayDiff.diff(oldArr, newArr, callback);
954 }
955
956 function unixToWin(patch) {
957 if (Array.isArray(patch)) {
958 return patch.map(unixToWin);
959 }
960 return _objectSpread2(_objectSpread2({}, patch), {}, {
961 hunks: patch.hunks.map(function (hunk) {
962 return _objectSpread2(_objectSpread2({}, hunk), {}, {
963 lines: hunk.lines.map(function (line, i) {
964 var _hunk$lines;
965 return line.startsWith('\\') || line.endsWith('\r') || (_hunk$lines = hunk.lines[i + 1]) !== null && _hunk$lines !== void 0 && _hunk$lines.startsWith('\\') ? line : line + '\r';
966 })
967 });
968 })
969 });
970 }
971 function winToUnix(patch) {
972 if (Array.isArray(patch)) {
973 return patch.map(winToUnix);
974 }
975 return _objectSpread2(_objectSpread2({}, patch), {}, {
976 hunks: patch.hunks.map(function (hunk) {
977 return _objectSpread2(_objectSpread2({}, hunk), {}, {
978 lines: hunk.lines.map(function (line) {
979 return line.endsWith('\r') ? line.substring(0, line.length - 1) : line;
980 })
981 });
982 })
983 });
984 }
985
986 /**
987 * Returns true if the patch consistently uses Unix line endings (or only involves one line and has
988 * no line endings).
989 */
990 function isUnix(patch) {
991 if (!Array.isArray(patch)) {
992 patch = [patch];
993 }
994 return !patch.some(function (index) {
995 return index.hunks.some(function (hunk) {
996 return hunk.lines.some(function (line) {
997 return !line.startsWith('\\') && line.endsWith('\r');
998 });
999 });
1000 });
1001 }
1002
1003 /**
1004 * Returns true if the patch uses Windows line endings and only Windows line endings.
1005 */
1006 function isWin(patch) {
1007 if (!Array.isArray(patch)) {
1008 patch = [patch];
1009 }
1010 return patch.some(function (index) {
1011 return index.hunks.some(function (hunk) {
1012 return hunk.lines.some(function (line) {
1013 return line.endsWith('\r');
1014 });
1015 });
1016 }) && patch.every(function (index) {
1017 return index.hunks.every(function (hunk) {
1018 return hunk.lines.every(function (line, i) {
1019 var _hunk$lines2;
1020 return line.startsWith('\\') || line.endsWith('\r') || ((_hunk$lines2 = hunk.lines[i + 1]) === null || _hunk$lines2 === void 0 ? void 0 : _hunk$lines2.startsWith('\\'));
1021 });
1022 });
1023 });
1024 }
1025
1026 function parsePatch(uniDiff) {
1027 var diffstr = uniDiff.split(/\n/),
1028 list = [],
1029 i = 0;
1030 function parseIndex() {
1031 var index = {};
1032 list.push(index);
1033
1034 // Parse diff metadata
1035 while (i < diffstr.length) {
1036 var line = diffstr[i];
1037
1038 // File header found, end parsing diff metadata
1039 if (/^(\-\-\-|\+\+\+|@@)\s/.test(line)) {
1040 break;
1041 }
1042
1043 // Diff index
1044 var header = /^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(line);
1045 if (header) {
1046 index.index = header[1];
1047 }
1048 i++;
1049 }
1050
1051 // Parse file headers if they are defined. Unified diff requires them, but
1052 // there's no technical issues to have an isolated hunk without file header
1053 parseFileHeader(index);
1054 parseFileHeader(index);
1055
1056 // Parse hunks
1057 index.hunks = [];
1058 while (i < diffstr.length) {
1059 var _line = diffstr[i];
1060 if (/^(Index:\s|diff\s|\-\-\-\s|\+\+\+\s|===================================================================)/.test(_line)) {
1061 break;
1062 } else if (/^@@/.test(_line)) {
1063 index.hunks.push(parseHunk());
1064 } else if (_line) {
1065 throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(_line));
1066 } else {
1067 i++;
1068 }
1069 }
1070 }
1071
1072 // Parses the --- and +++ headers, if none are found, no lines
1073 // are consumed.
1074 function parseFileHeader(index) {
1075 var fileHeader = /^(---|\+\+\+)\s+(.*)\r?$/.exec(diffstr[i]);
1076 if (fileHeader) {
1077 var keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';
1078 var data = fileHeader[2].split('\t', 2);
1079 var fileName = data[0].replace(/\\\\/g, '\\');
1080 if (/^".*"$/.test(fileName)) {
1081 fileName = fileName.substr(1, fileName.length - 2);
1082 }
1083 index[keyPrefix + 'FileName'] = fileName;
1084 index[keyPrefix + 'Header'] = (data[1] || '').trim();
1085 i++;
1086 }
1087 }
1088
1089 // Parses a hunk
1090 // This assumes that we are at the start of a hunk.
1091 function parseHunk() {
1092 var chunkHeaderIndex = i,
1093 chunkHeaderLine = diffstr[i++],
1094 chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
1095 var hunk = {
1096 oldStart: +chunkHeader[1],
1097 oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
1098 newStart: +chunkHeader[3],
1099 newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
1100 lines: []
1101 };
1102
1103 // Unified Diff Format quirk: If the chunk size is 0,
1104 // the first number is one lower than one would expect.
1105 // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
1106 if (hunk.oldLines === 0) {
1107 hunk.oldStart += 1;
1108 }
1109 if (hunk.newLines === 0) {
1110 hunk.newStart += 1;
1111 }
1112 var addCount = 0,
1113 removeCount = 0;
1114 for (; i < diffstr.length && (removeCount < hunk.oldLines || addCount < hunk.newLines || (_diffstr$i = diffstr[i]) !== null && _diffstr$i !== void 0 && _diffstr$i.startsWith('\\')); i++) {
1115 var _diffstr$i;
1116 var operation = diffstr[i].length == 0 && i != diffstr.length - 1 ? ' ' : diffstr[i][0];
1117 if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
1118 hunk.lines.push(diffstr[i]);
1119 if (operation === '+') {
1120 addCount++;
1121 } else if (operation === '-') {
1122 removeCount++;
1123 } else if (operation === ' ') {
1124 addCount++;
1125 removeCount++;
1126 }
1127 } else {
1128 throw new Error("Hunk at line ".concat(chunkHeaderIndex + 1, " contained invalid line ").concat(diffstr[i]));
1129 }
1130 }
1131
1132 // Handle the empty block count case
1133 if (!addCount && hunk.newLines === 1) {
1134 hunk.newLines = 0;
1135 }
1136 if (!removeCount && hunk.oldLines === 1) {
1137 hunk.oldLines = 0;
1138 }
1139
1140 // Perform sanity checking
1141 if (addCount !== hunk.newLines) {
1142 throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
1143 }
1144 if (removeCount !== hunk.oldLines) {
1145 throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
1146 }
1147 return hunk;
1148 }
1149 while (i < diffstr.length) {
1150 parseIndex();
1151 }
1152 return list;
1153 }
1154
1155 // Iterator that traverses in the range of [min, max], stepping
1156 // by distance from a given start position. I.e. for [0, 4], with
1157 // start of 2, this will iterate 2, 3, 1, 4, 0.
1158 function distanceIterator (start, minLine, maxLine) {
1159 var wantForward = true,
1160 backwardExhausted = false,
1161 forwardExhausted = false,
1162 localOffset = 1;
1163 return function iterator() {
1164 if (wantForward && !forwardExhausted) {
1165 if (backwardExhausted) {
1166 localOffset++;
1167 } else {
1168 wantForward = false;
1169 }
1170
1171 // Check if trying to fit beyond text length, and if not, check it fits
1172 // after offset location (or desired location on first iteration)
1173 if (start + localOffset <= maxLine) {
1174 return start + localOffset;
1175 }
1176 forwardExhausted = true;
1177 }
1178 if (!backwardExhausted) {
1179 if (!forwardExhausted) {
1180 wantForward = true;
1181 }
1182
1183 // Check if trying to fit before text beginning, and if not, check it fits
1184 // before offset location
1185 if (minLine <= start - localOffset) {
1186 return start - localOffset++;
1187 }
1188 backwardExhausted = true;
1189 return iterator();
1190 }
1191
1192 // We tried to fit hunk before text beginning and beyond text length, then
1193 // hunk can't fit on the text. Return undefined
1194 };
1195 }
1196
1197 function applyPatch(source, uniDiff) {
1198 var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
1199 if (typeof uniDiff === 'string') {
1200 uniDiff = parsePatch(uniDiff);
1201 }
1202 if (Array.isArray(uniDiff)) {
1203 if (uniDiff.length > 1) {
1204 throw new Error('applyPatch only works with a single input.');
1205 }
1206 uniDiff = uniDiff[0];
1207 }
1208 if (options.autoConvertLineEndings || options.autoConvertLineEndings == null) {
1209 if (hasOnlyWinLineEndings(source) && isUnix(uniDiff)) {
1210 uniDiff = unixToWin(uniDiff);
1211 } else if (hasOnlyUnixLineEndings(source) && isWin(uniDiff)) {
1212 uniDiff = winToUnix(uniDiff);
1213 }
1214 }
1215
1216 // Apply the diff to the input
1217 var lines = source.split('\n'),
1218 hunks = uniDiff.hunks,
1219 compareLine = options.compareLine || function (lineNumber, line, operation, patchContent) {
1220 return line === patchContent;
1221 },
1222 fuzzFactor = options.fuzzFactor || 0,
1223 minLine = 0;
1224 if (fuzzFactor < 0 || !Number.isInteger(fuzzFactor)) {
1225 throw new Error('fuzzFactor must be a non-negative integer');
1226 }
1227
1228 // Special case for empty patch.
1229 if (!hunks.length) {
1230 return source;
1231 }
1232
1233 // Before anything else, handle EOFNL insertion/removal. If the patch tells us to make a change
1234 // to the EOFNL that is redundant/impossible - i.e. to remove a newline that's not there, or add a
1235 // newline that already exists - then we either return false and fail to apply the patch (if
1236 // fuzzFactor is 0) or simply ignore the problem and do nothing (if fuzzFactor is >0).
1237 // If we do need to remove/add a newline at EOF, this will always be in the final hunk:
1238 var prevLine = '',
1239 removeEOFNL = false,
1240 addEOFNL = false;
1241 for (var i = 0; i < hunks[hunks.length - 1].lines.length; i++) {
1242 var line = hunks[hunks.length - 1].lines[i];
1243 if (line[0] == '\\') {
1244 if (prevLine[0] == '+') {
1245 removeEOFNL = true;
1246 } else if (prevLine[0] == '-') {
1247 addEOFNL = true;
1248 }
1249 }
1250 prevLine = line;
1251 }
1252 if (removeEOFNL) {
1253 if (addEOFNL) {
1254 // This means the final line gets changed but doesn't have a trailing newline in either the
1255 // original or patched version. In that case, we do nothing if fuzzFactor > 0, and if
1256 // fuzzFactor is 0, we simply validate that the source file has no trailing newline.
1257 if (!fuzzFactor && lines[lines.length - 1] == '') {
1258 return false;
1259 }
1260 } else if (lines[lines.length - 1] == '') {
1261 lines.pop();
1262 } else if (!fuzzFactor) {
1263 return false;
1264 }
1265 } else if (addEOFNL) {
1266 if (lines[lines.length - 1] != '') {
1267 lines.push('');
1268 } else if (!fuzzFactor) {
1269 return false;
1270 }
1271 }
1272
1273 /**
1274 * Checks if the hunk can be made to fit at the provided location with at most `maxErrors`
1275 * insertions, substitutions, or deletions, while ensuring also that:
1276 * - lines deleted in the hunk match exactly, and
1277 * - wherever an insertion operation or block of insertion operations appears in the hunk, the
1278 * immediately preceding and following lines of context match exactly
1279 *
1280 * `toPos` should be set such that lines[toPos] is meant to match hunkLines[0].
1281 *
1282 * If the hunk can be applied, returns an object with properties `oldLineLastI` and
1283 * `replacementLines`. Otherwise, returns null.
1284 */
1285 function applyHunk(hunkLines, toPos, maxErrors) {
1286 var hunkLinesI = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : 0;
1287 var lastContextLineMatched = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : true;
1288 var patchedLines = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : [];
1289 var patchedLinesLength = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : 0;
1290 var nConsecutiveOldContextLines = 0;
1291 var nextContextLineMustMatch = false;
1292 for (; hunkLinesI < hunkLines.length; hunkLinesI++) {
1293 var hunkLine = hunkLines[hunkLinesI],
1294 operation = hunkLine.length > 0 ? hunkLine[0] : ' ',
1295 content = hunkLine.length > 0 ? hunkLine.substr(1) : hunkLine;
1296 if (operation === '-') {
1297 if (compareLine(toPos + 1, lines[toPos], operation, content)) {
1298 toPos++;
1299 nConsecutiveOldContextLines = 0;
1300 } else {
1301 if (!maxErrors || lines[toPos] == null) {
1302 return null;
1303 }
1304 patchedLines[patchedLinesLength] = lines[toPos];
1305 return applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1);
1306 }
1307 }
1308 if (operation === '+') {
1309 if (!lastContextLineMatched) {
1310 return null;
1311 }
1312 patchedLines[patchedLinesLength] = content;
1313 patchedLinesLength++;
1314 nConsecutiveOldContextLines = 0;
1315 nextContextLineMustMatch = true;
1316 }
1317 if (operation === ' ') {
1318 nConsecutiveOldContextLines++;
1319 patchedLines[patchedLinesLength] = lines[toPos];
1320 if (compareLine(toPos + 1, lines[toPos], operation, content)) {
1321 patchedLinesLength++;
1322 lastContextLineMatched = true;
1323 nextContextLineMustMatch = false;
1324 toPos++;
1325 } else {
1326 if (nextContextLineMustMatch || !maxErrors) {
1327 return null;
1328 }
1329
1330 // Consider 3 possibilities in sequence:
1331 // 1. lines contains a *substitution* not included in the patch context, or
1332 // 2. lines contains an *insertion* not included in the patch context, or
1333 // 3. lines contains a *deletion* not included in the patch context
1334 // The first two options are of course only possible if the line from lines is non-null -
1335 // i.e. only option 3 is possible if we've overrun the end of the old file.
1336 return lines[toPos] && (applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength + 1) || applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1)) || applyHunk(hunkLines, toPos, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength);
1337 }
1338 }
1339 }
1340
1341 // Before returning, trim any unmodified context lines off the end of patchedLines and reduce
1342 // toPos (and thus oldLineLastI) accordingly. This allows later hunks to be applied to a region
1343 // that starts in this hunk's trailing context.
1344 patchedLinesLength -= nConsecutiveOldContextLines;
1345 toPos -= nConsecutiveOldContextLines;
1346 patchedLines.length = patchedLinesLength;
1347 return {
1348 patchedLines: patchedLines,
1349 oldLineLastI: toPos - 1
1350 };
1351 }
1352 var resultLines = [];
1353
1354 // Search best fit offsets for each hunk based on the previous ones
1355 var prevHunkOffset = 0;
1356 for (var _i = 0; _i < hunks.length; _i++) {
1357 var hunk = hunks[_i];
1358 var hunkResult = void 0;
1359 var maxLine = lines.length - hunk.oldLines + fuzzFactor;
1360 var toPos = void 0;
1361 for (var maxErrors = 0; maxErrors <= fuzzFactor; maxErrors++) {
1362 toPos = hunk.oldStart + prevHunkOffset - 1;
1363 var iterator = distanceIterator(toPos, minLine, maxLine);
1364 for (; toPos !== undefined; toPos = iterator()) {
1365 hunkResult = applyHunk(hunk.lines, toPos, maxErrors);
1366 if (hunkResult) {
1367 break;
1368 }
1369 }
1370 if (hunkResult) {
1371 break;
1372 }
1373 }
1374 if (!hunkResult) {
1375 return false;
1376 }
1377
1378 // Copy everything from the end of where we applied the last hunk to the start of this hunk
1379 for (var _i2 = minLine; _i2 < toPos; _i2++) {
1380 resultLines.push(lines[_i2]);
1381 }
1382
1383 // Add the lines produced by applying the hunk:
1384 for (var _i3 = 0; _i3 < hunkResult.patchedLines.length; _i3++) {
1385 var _line = hunkResult.patchedLines[_i3];
1386 resultLines.push(_line);
1387 }
1388
1389 // Set lower text limit to end of the current hunk, so next ones don't try
1390 // to fit over already patched text
1391 minLine = hunkResult.oldLineLastI + 1;
1392
1393 // Note the offset between where the patch said the hunk should've applied and where we
1394 // applied it, so we can adjust future hunks accordingly:
1395 prevHunkOffset = toPos + 1 - hunk.oldStart;
1396 }
1397
1398 // Copy over the rest of the lines from the old text
1399 for (var _i4 = minLine; _i4 < lines.length; _i4++) {
1400 resultLines.push(lines[_i4]);
1401 }
1402 return resultLines.join('\n');
1403 }
1404
1405 // Wrapper that supports multiple file patches via callbacks.
1406 function applyPatches(uniDiff, options) {
1407 if (typeof uniDiff === 'string') {
1408 uniDiff = parsePatch(uniDiff);
1409 }
1410 var currentIndex = 0;
1411 function processIndex() {
1412 var index = uniDiff[currentIndex++];
1413 if (!index) {
1414 return options.complete();
1415 }
1416 options.loadFile(index, function (err, data) {
1417 if (err) {
1418 return options.complete(err);
1419 }
1420 var updatedContent = applyPatch(data, index, options);
1421 options.patched(index, updatedContent, function (err) {
1422 if (err) {
1423 return options.complete(err);
1424 }
1425 processIndex();
1426 });
1427 });
1428 }
1429 processIndex();
1430 }
1431
1432 function structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
1433 if (!options) {
1434 options = {};
1435 }
1436 if (typeof options === 'function') {
1437 options = {
1438 callback: options
1439 };
1440 }
1441 if (typeof options.context === 'undefined') {
1442 options.context = 4;
1443 }
1444 if (options.newlineIsToken) {
1445 throw new Error('newlineIsToken may not be used with patch-generation functions, only with diffing functions');
1446 }
1447 if (!options.callback) {
1448 return diffLinesResultToPatch(diffLines(oldStr, newStr, options));
1449 } else {
1450 var _options = options,
1451 _callback = _options.callback;
1452 diffLines(oldStr, newStr, _objectSpread2(_objectSpread2({}, options), {}, {
1453 callback: function callback(diff) {
1454 var patch = diffLinesResultToPatch(diff);
1455 _callback(patch);
1456 }
1457 }));
1458 }
1459 function diffLinesResultToPatch(diff) {
1460 // STEP 1: Build up the patch with no "\ No newline at end of file" lines and with the arrays
1461 // of lines containing trailing newline characters. We'll tidy up later...
1462
1463 if (!diff) {
1464 return;
1465 }
1466 diff.push({
1467 value: '',
1468 lines: []
1469 }); // Append an empty value to make cleanup easier
1470
1471 function contextLines(lines) {
1472 return lines.map(function (entry) {
1473 return ' ' + entry;
1474 });
1475 }
1476 var hunks = [];
1477 var oldRangeStart = 0,
1478 newRangeStart = 0,
1479 curRange = [],
1480 oldLine = 1,
1481 newLine = 1;
1482 var _loop = function _loop() {
1483 var current = diff[i],
1484 lines = current.lines || splitLines(current.value);
1485 current.lines = lines;
1486 if (current.added || current.removed) {
1487 var _curRange;
1488 // If we have previous context, start with that
1489 if (!oldRangeStart) {
1490 var prev = diff[i - 1];
1491 oldRangeStart = oldLine;
1492 newRangeStart = newLine;
1493 if (prev) {
1494 curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
1495 oldRangeStart -= curRange.length;
1496 newRangeStart -= curRange.length;
1497 }
1498 }
1499
1500 // Output our changes
1501 (_curRange = curRange).push.apply(_curRange, _toConsumableArray(lines.map(function (entry) {
1502 return (current.added ? '+' : '-') + entry;
1503 })));
1504
1505 // Track the updated file position
1506 if (current.added) {
1507 newLine += lines.length;
1508 } else {
1509 oldLine += lines.length;
1510 }
1511 } else {
1512 // Identical context lines. Track line changes
1513 if (oldRangeStart) {
1514 // Close out any changes that have been output (or join overlapping)
1515 if (lines.length <= options.context * 2 && i < diff.length - 2) {
1516 var _curRange2;
1517 // Overlapping
1518 (_curRange2 = curRange).push.apply(_curRange2, _toConsumableArray(contextLines(lines)));
1519 } else {
1520 var _curRange3;
1521 // end the range and output
1522 var contextSize = Math.min(lines.length, options.context);
1523 (_curRange3 = curRange).push.apply(_curRange3, _toConsumableArray(contextLines(lines.slice(0, contextSize))));
1524 var _hunk = {
1525 oldStart: oldRangeStart,
1526 oldLines: oldLine - oldRangeStart + contextSize,
1527 newStart: newRangeStart,
1528 newLines: newLine - newRangeStart + contextSize,
1529 lines: curRange
1530 };
1531 hunks.push(_hunk);
1532 oldRangeStart = 0;
1533 newRangeStart = 0;
1534 curRange = [];
1535 }
1536 }
1537 oldLine += lines.length;
1538 newLine += lines.length;
1539 }
1540 };
1541 for (var i = 0; i < diff.length; i++) {
1542 _loop();
1543 }
1544
1545 // Step 2: eliminate the trailing `\n` from each line of each hunk, and, where needed, add
1546 // "\ No newline at end of file".
1547 for (var _i = 0, _hunks = hunks; _i < _hunks.length; _i++) {
1548 var hunk = _hunks[_i];
1549 for (var _i2 = 0; _i2 < hunk.lines.length; _i2++) {
1550 if (hunk.lines[_i2].endsWith('\n')) {
1551 hunk.lines[_i2] = hunk.lines[_i2].slice(0, -1);
1552 } else {
1553 hunk.lines.splice(_i2 + 1, 0, '\\ No newline at end of file');
1554 _i2++; // Skip the line we just added, then continue iterating
1555 }
1556 }
1557 }
1558 return {
1559 oldFileName: oldFileName,
1560 newFileName: newFileName,
1561 oldHeader: oldHeader,
1562 newHeader: newHeader,
1563 hunks: hunks
1564 };
1565 }
1566 }
1567 function formatPatch(diff) {
1568 if (Array.isArray(diff)) {
1569 return diff.map(formatPatch).join('\n');
1570 }
1571 var ret = [];
1572 if (diff.oldFileName == diff.newFileName) {
1573 ret.push('Index: ' + diff.oldFileName);
1574 }
1575 ret.push('===================================================================');
1576 ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\t' + diff.oldHeader));
1577 ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\t' + diff.newHeader));
1578 for (var i = 0; i < diff.hunks.length; i++) {
1579 var hunk = diff.hunks[i];
1580 // Unified Diff Format quirk: If the chunk size is 0,
1581 // the first number is one lower than one would expect.
1582 // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
1583 if (hunk.oldLines === 0) {
1584 hunk.oldStart -= 1;
1585 }
1586 if (hunk.newLines === 0) {
1587 hunk.newStart -= 1;
1588 }
1589 ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines + ' +' + hunk.newStart + ',' + hunk.newLines + ' @@');
1590 ret.push.apply(ret, hunk.lines);
1591 }
1592 return ret.join('\n') + '\n';
1593 }
1594 function createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
1595 var _options2;
1596 if (typeof options === 'function') {
1597 options = {
1598 callback: options
1599 };
1600 }
1601 if (!((_options2 = options) !== null && _options2 !== void 0 && _options2.callback)) {
1602 var patchObj = structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options);
1603 if (!patchObj) {
1604 return;
1605 }
1606 return formatPatch(patchObj);
1607 } else {
1608 var _options3 = options,
1609 _callback2 = _options3.callback;
1610 structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, _objectSpread2(_objectSpread2({}, options), {}, {
1611 callback: function callback(patchObj) {
1612 if (!patchObj) {
1613 _callback2();
1614 } else {
1615 _callback2(formatPatch(patchObj));
1616 }
1617 }
1618 }));
1619 }
1620 }
1621 function createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
1622 return createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
1623 }
1624
1625 /**
1626 * Split `text` into an array of lines, including the trailing newline character (where present)
1627 */
1628 function splitLines(text) {
1629 var hasTrailingNl = text.endsWith('\n');
1630 var result = text.split('\n').map(function (line) {
1631 return line + '\n';
1632 });
1633 if (hasTrailingNl) {
1634 result.pop();
1635 } else {
1636 result.push(result.pop().slice(0, -1));
1637 }
1638 return result;
1639 }
1640
1641 function arrayEqual(a, b) {
1642 if (a.length !== b.length) {
1643 return false;
1644 }
1645 return arrayStartsWith(a, b);
1646 }
1647 function arrayStartsWith(array, start) {
1648 if (start.length > array.length) {
1649 return false;
1650 }
1651 for (var i = 0; i < start.length; i++) {
1652 if (start[i] !== array[i]) {
1653 return false;
1654 }
1655 }
1656 return true;
1657 }
1658
1659 function calcLineCount(hunk) {
1660 var _calcOldNewLineCount = calcOldNewLineCount(hunk.lines),
1661 oldLines = _calcOldNewLineCount.oldLines,
1662 newLines = _calcOldNewLineCount.newLines;
1663 if (oldLines !== undefined) {
1664 hunk.oldLines = oldLines;
1665 } else {
1666 delete hunk.oldLines;
1667 }
1668 if (newLines !== undefined) {
1669 hunk.newLines = newLines;
1670 } else {
1671 delete hunk.newLines;
1672 }
1673 }
1674 function merge(mine, theirs, base) {
1675 mine = loadPatch(mine, base);
1676 theirs = loadPatch(theirs, base);
1677 var ret = {};
1678
1679 // For index we just let it pass through as it doesn't have any necessary meaning.
1680 // Leaving sanity checks on this to the API consumer that may know more about the
1681 // meaning in their own context.
1682 if (mine.index || theirs.index) {
1683 ret.index = mine.index || theirs.index;
1684 }
1685 if (mine.newFileName || theirs.newFileName) {
1686 if (!fileNameChanged(mine)) {
1687 // No header or no change in ours, use theirs (and ours if theirs does not exist)
1688 ret.oldFileName = theirs.oldFileName || mine.oldFileName;
1689 ret.newFileName = theirs.newFileName || mine.newFileName;
1690 ret.oldHeader = theirs.oldHeader || mine.oldHeader;
1691 ret.newHeader = theirs.newHeader || mine.newHeader;
1692 } else if (!fileNameChanged(theirs)) {
1693 // No header or no change in theirs, use ours
1694 ret.oldFileName = mine.oldFileName;
1695 ret.newFileName = mine.newFileName;
1696 ret.oldHeader = mine.oldHeader;
1697 ret.newHeader = mine.newHeader;
1698 } else {
1699 // Both changed... figure it out
1700 ret.oldFileName = selectField(ret, mine.oldFileName, theirs.oldFileName);
1701 ret.newFileName = selectField(ret, mine.newFileName, theirs.newFileName);
1702 ret.oldHeader = selectField(ret, mine.oldHeader, theirs.oldHeader);
1703 ret.newHeader = selectField(ret, mine.newHeader, theirs.newHeader);
1704 }
1705 }
1706 ret.hunks = [];
1707 var mineIndex = 0,
1708 theirsIndex = 0,
1709 mineOffset = 0,
1710 theirsOffset = 0;
1711 while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {
1712 var mineCurrent = mine.hunks[mineIndex] || {
1713 oldStart: Infinity
1714 },
1715 theirsCurrent = theirs.hunks[theirsIndex] || {
1716 oldStart: Infinity
1717 };
1718 if (hunkBefore(mineCurrent, theirsCurrent)) {
1719 // This patch does not overlap with any of the others, yay.
1720 ret.hunks.push(cloneHunk(mineCurrent, mineOffset));
1721 mineIndex++;
1722 theirsOffset += mineCurrent.newLines - mineCurrent.oldLines;
1723 } else if (hunkBefore(theirsCurrent, mineCurrent)) {
1724 // This patch does not overlap with any of the others, yay.
1725 ret.hunks.push(cloneHunk(theirsCurrent, theirsOffset));
1726 theirsIndex++;
1727 mineOffset += theirsCurrent.newLines - theirsCurrent.oldLines;
1728 } else {
1729 // Overlap, merge as best we can
1730 var mergedHunk = {
1731 oldStart: Math.min(mineCurrent.oldStart, theirsCurrent.oldStart),
1732 oldLines: 0,
1733 newStart: Math.min(mineCurrent.newStart + mineOffset, theirsCurrent.oldStart + theirsOffset),
1734 newLines: 0,
1735 lines: []
1736 };
1737 mergeLines(mergedHunk, mineCurrent.oldStart, mineCurrent.lines, theirsCurrent.oldStart, theirsCurrent.lines);
1738 theirsIndex++;
1739 mineIndex++;
1740 ret.hunks.push(mergedHunk);
1741 }
1742 }
1743 return ret;
1744 }
1745 function loadPatch(param, base) {
1746 if (typeof param === 'string') {
1747 if (/^@@/m.test(param) || /^Index:/m.test(param)) {
1748 return parsePatch(param)[0];
1749 }
1750 if (!base) {
1751 throw new Error('Must provide a base reference or pass in a patch');
1752 }
1753 return structuredPatch(undefined, undefined, base, param);
1754 }
1755 return param;
1756 }
1757 function fileNameChanged(patch) {
1758 return patch.newFileName && patch.newFileName !== patch.oldFileName;
1759 }
1760 function selectField(index, mine, theirs) {
1761 if (mine === theirs) {
1762 return mine;
1763 } else {
1764 index.conflict = true;
1765 return {
1766 mine: mine,
1767 theirs: theirs
1768 };
1769 }
1770 }
1771 function hunkBefore(test, check) {
1772 return test.oldStart < check.oldStart && test.oldStart + test.oldLines < check.oldStart;
1773 }
1774 function cloneHunk(hunk, offset) {
1775 return {
1776 oldStart: hunk.oldStart,
1777 oldLines: hunk.oldLines,
1778 newStart: hunk.newStart + offset,
1779 newLines: hunk.newLines,
1780 lines: hunk.lines
1781 };
1782 }
1783 function mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
1784 // This will generally result in a conflicted hunk, but there are cases where the context
1785 // is the only overlap where we can successfully merge the content here.
1786 var mine = {
1787 offset: mineOffset,
1788 lines: mineLines,
1789 index: 0
1790 },
1791 their = {
1792 offset: theirOffset,
1793 lines: theirLines,
1794 index: 0
1795 };
1796
1797 // Handle any leading content
1798 insertLeading(hunk, mine, their);
1799 insertLeading(hunk, their, mine);
1800
1801 // Now in the overlap content. Scan through and select the best changes from each.
1802 while (mine.index < mine.lines.length && their.index < their.lines.length) {
1803 var mineCurrent = mine.lines[mine.index],
1804 theirCurrent = their.lines[their.index];
1805 if ((mineCurrent[0] === '-' || mineCurrent[0] === '+') && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {
1806 // Both modified ...
1807 mutualChange(hunk, mine, their);
1808 } else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {
1809 var _hunk$lines;
1810 // Mine inserted
1811 (_hunk$lines = hunk.lines).push.apply(_hunk$lines, _toConsumableArray(collectChange(mine)));
1812 } else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {
1813 var _hunk$lines2;
1814 // Theirs inserted
1815 (_hunk$lines2 = hunk.lines).push.apply(_hunk$lines2, _toConsumableArray(collectChange(their)));
1816 } else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {
1817 // Mine removed or edited
1818 removal(hunk, mine, their);
1819 } else if (theirCurrent[0] === '-' && mineCurrent[0] === ' ') {
1820 // Their removed or edited
1821 removal(hunk, their, mine, true);
1822 } else if (mineCurrent === theirCurrent) {
1823 // Context identity
1824 hunk.lines.push(mineCurrent);
1825 mine.index++;
1826 their.index++;
1827 } else {
1828 // Context mismatch
1829 conflict(hunk, collectChange(mine), collectChange(their));
1830 }
1831 }
1832
1833 // Now push anything that may be remaining
1834 insertTrailing(hunk, mine);
1835 insertTrailing(hunk, their);
1836 calcLineCount(hunk);
1837 }
1838 function mutualChange(hunk, mine, their) {
1839 var myChanges = collectChange(mine),
1840 theirChanges = collectChange(their);
1841 if (allRemoves(myChanges) && allRemoves(theirChanges)) {
1842 // Special case for remove changes that are supersets of one another
1843 if (arrayStartsWith(myChanges, theirChanges) && skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {
1844 var _hunk$lines3;
1845 (_hunk$lines3 = hunk.lines).push.apply(_hunk$lines3, _toConsumableArray(myChanges));
1846 return;
1847 } else if (arrayStartsWith(theirChanges, myChanges) && skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {
1848 var _hunk$lines4;
1849 (_hunk$lines4 = hunk.lines).push.apply(_hunk$lines4, _toConsumableArray(theirChanges));
1850 return;
1851 }
1852 } else if (arrayEqual(myChanges, theirChanges)) {
1853 var _hunk$lines5;
1854 (_hunk$lines5 = hunk.lines).push.apply(_hunk$lines5, _toConsumableArray(myChanges));
1855 return;
1856 }
1857 conflict(hunk, myChanges, theirChanges);
1858 }
1859 function removal(hunk, mine, their, swap) {
1860 var myChanges = collectChange(mine),
1861 theirChanges = collectContext(their, myChanges);
1862 if (theirChanges.merged) {
1863 var _hunk$lines6;
1864 (_hunk$lines6 = hunk.lines).push.apply(_hunk$lines6, _toConsumableArray(theirChanges.merged));
1865 } else {
1866 conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);
1867 }
1868 }
1869 function conflict(hunk, mine, their) {
1870 hunk.conflict = true;
1871 hunk.lines.push({
1872 conflict: true,
1873 mine: mine,
1874 theirs: their
1875 });
1876 }
1877 function insertLeading(hunk, insert, their) {
1878 while (insert.offset < their.offset && insert.index < insert.lines.length) {
1879 var line = insert.lines[insert.index++];
1880 hunk.lines.push(line);
1881 insert.offset++;
1882 }
1883 }
1884 function insertTrailing(hunk, insert) {
1885 while (insert.index < insert.lines.length) {
1886 var line = insert.lines[insert.index++];
1887 hunk.lines.push(line);
1888 }
1889 }
1890 function collectChange(state) {
1891 var ret = [],
1892 operation = state.lines[state.index][0];
1893 while (state.index < state.lines.length) {
1894 var line = state.lines[state.index];
1895
1896 // Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
1897 if (operation === '-' && line[0] === '+') {
1898 operation = '+';
1899 }
1900 if (operation === line[0]) {
1901 ret.push(line);
1902 state.index++;
1903 } else {
1904 break;
1905 }
1906 }
1907 return ret;
1908 }
1909 function collectContext(state, matchChanges) {
1910 var changes = [],
1911 merged = [],
1912 matchIndex = 0,
1913 contextChanges = false,
1914 conflicted = false;
1915 while (matchIndex < matchChanges.length && state.index < state.lines.length) {
1916 var change = state.lines[state.index],
1917 match = matchChanges[matchIndex];
1918
1919 // Once we've hit our add, then we are done
1920 if (match[0] === '+') {
1921 break;
1922 }
1923 contextChanges = contextChanges || change[0] !== ' ';
1924 merged.push(match);
1925 matchIndex++;
1926
1927 // Consume any additions in the other block as a conflict to attempt
1928 // to pull in the remaining context after this
1929 if (change[0] === '+') {
1930 conflicted = true;
1931 while (change[0] === '+') {
1932 changes.push(change);
1933 change = state.lines[++state.index];
1934 }
1935 }
1936 if (match.substr(1) === change.substr(1)) {
1937 changes.push(change);
1938 state.index++;
1939 } else {
1940 conflicted = true;
1941 }
1942 }
1943 if ((matchChanges[matchIndex] || '')[0] === '+' && contextChanges) {
1944 conflicted = true;
1945 }
1946 if (conflicted) {
1947 return changes;
1948 }
1949 while (matchIndex < matchChanges.length) {
1950 merged.push(matchChanges[matchIndex++]);
1951 }
1952 return {
1953 merged: merged,
1954 changes: changes
1955 };
1956 }
1957 function allRemoves(changes) {
1958 return changes.reduce(function (prev, change) {
1959 return prev && change[0] === '-';
1960 }, true);
1961 }
1962 function skipRemoveSuperset(state, removeChanges, delta) {
1963 for (var i = 0; i < delta; i++) {
1964 var changeContent = removeChanges[removeChanges.length - delta + i].substr(1);
1965 if (state.lines[state.index + i] !== ' ' + changeContent) {
1966 return false;
1967 }
1968 }
1969 state.index += delta;
1970 return true;
1971 }
1972 function calcOldNewLineCount(lines) {
1973 var oldLines = 0;
1974 var newLines = 0;
1975 lines.forEach(function (line) {
1976 if (typeof line !== 'string') {
1977 var myCount = calcOldNewLineCount(line.mine);
1978 var theirCount = calcOldNewLineCount(line.theirs);
1979 if (oldLines !== undefined) {
1980 if (myCount.oldLines === theirCount.oldLines) {
1981 oldLines += myCount.oldLines;
1982 } else {
1983 oldLines = undefined;
1984 }
1985 }
1986 if (newLines !== undefined) {
1987 if (myCount.newLines === theirCount.newLines) {
1988 newLines += myCount.newLines;
1989 } else {
1990 newLines = undefined;
1991 }
1992 }
1993 } else {
1994 if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {
1995 newLines++;
1996 }
1997 if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {
1998 oldLines++;
1999 }
2000 }
2001 });
2002 return {
2003 oldLines: oldLines,
2004 newLines: newLines
2005 };
2006 }
2007
2008 function reversePatch(structuredPatch) {
2009 if (Array.isArray(structuredPatch)) {
2010 return structuredPatch.map(reversePatch).reverse();
2011 }
2012 return _objectSpread2(_objectSpread2({}, structuredPatch), {}, {
2013 oldFileName: structuredPatch.newFileName,
2014 oldHeader: structuredPatch.newHeader,
2015 newFileName: structuredPatch.oldFileName,
2016 newHeader: structuredPatch.oldHeader,
2017 hunks: structuredPatch.hunks.map(function (hunk) {
2018 return {
2019 oldLines: hunk.newLines,
2020 oldStart: hunk.newStart,
2021 newLines: hunk.oldLines,
2022 newStart: hunk.oldStart,
2023 lines: hunk.lines.map(function (l) {
2024 if (l.startsWith('-')) {
2025 return "+".concat(l.slice(1));
2026 }
2027 if (l.startsWith('+')) {
2028 return "-".concat(l.slice(1));
2029 }
2030 return l;
2031 })
2032 };
2033 })
2034 });
2035 }
2036
2037 // See: http://code.google.com/p/google-diff-match-patch/wiki/API
2038 function convertChangesToDMP(changes) {
2039 var ret = [],
2040 change,
2041 operation;
2042 for (var i = 0; i < changes.length; i++) {
2043 change = changes[i];
2044 if (change.added) {
2045 operation = 1;
2046 } else if (change.removed) {
2047 operation = -1;
2048 } else {
2049 operation = 0;
2050 }
2051 ret.push([operation, change.value]);
2052 }
2053 return ret;
2054 }
2055
2056 function convertChangesToXML(changes) {
2057 var ret = [];
2058 for (var i = 0; i < changes.length; i++) {
2059 var change = changes[i];
2060 if (change.added) {
2061 ret.push('<ins>');
2062 } else if (change.removed) {
2063 ret.push('<del>');
2064 }
2065 ret.push(escapeHTML(change.value));
2066 if (change.added) {
2067 ret.push('</ins>');
2068 } else if (change.removed) {
2069 ret.push('</del>');
2070 }
2071 }
2072 return ret.join('');
2073 }
2074 function escapeHTML(s) {
2075 var n = s;
2076 n = n.replace(/&/g, '&amp;');
2077 n = n.replace(/</g, '&lt;');
2078 n = n.replace(/>/g, '&gt;');
2079 n = n.replace(/"/g, '&quot;');
2080 return n;
2081 }
2082
2083 exports.Diff = Diff;
2084 exports.applyPatch = applyPatch;
2085 exports.applyPatches = applyPatches;
2086 exports.canonicalize = canonicalize;
2087 exports.convertChangesToDMP = convertChangesToDMP;
2088 exports.convertChangesToXML = convertChangesToXML;
2089 exports.createPatch = createPatch;
2090 exports.createTwoFilesPatch = createTwoFilesPatch;
2091 exports.diffArrays = diffArrays;
2092 exports.diffChars = diffChars;
2093 exports.diffCss = diffCss;
2094 exports.diffJson = diffJson;
2095 exports.diffLines = diffLines;
2096 exports.diffSentences = diffSentences;
2097 exports.diffTrimmedLines = diffTrimmedLines;
2098 exports.diffWords = diffWords;
2099 exports.diffWordsWithSpace = diffWordsWithSpace;
2100 exports.formatPatch = formatPatch;
2101 exports.merge = merge;
2102 exports.parsePatch = parsePatch;
2103 exports.reversePatch = reversePatch;
2104 exports.structuredPatch = structuredPatch;
2105
2106}));