UNPKG

72.2 kBJavaScriptView Raw
1function Diff() {}
2Diff.prototype = {
3 diff: function diff(oldString, newString) {
4 var _options$timeout;
5 var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
6 var callback = options.callback;
7 if (typeof options === 'function') {
8 callback = options;
9 options = {};
10 }
11 var self = this;
12 function done(value) {
13 value = self.postProcess(value, options);
14 if (callback) {
15 setTimeout(function () {
16 callback(value);
17 }, 0);
18 return true;
19 } else {
20 return value;
21 }
22 }
23
24 // Allow subclasses to massage the input prior to running
25 oldString = this.castInput(oldString, options);
26 newString = this.castInput(newString, options);
27 oldString = this.removeEmpty(this.tokenize(oldString, options));
28 newString = this.removeEmpty(this.tokenize(newString, options));
29 var newLen = newString.length,
30 oldLen = oldString.length;
31 var editLength = 1;
32 var maxEditLength = newLen + oldLen;
33 if (options.maxEditLength != null) {
34 maxEditLength = Math.min(maxEditLength, options.maxEditLength);
35 }
36 var maxExecutionTime = (_options$timeout = options.timeout) !== null && _options$timeout !== void 0 ? _options$timeout : Infinity;
37 var abortAfterTimestamp = Date.now() + maxExecutionTime;
38 var bestPath = [{
39 oldPos: -1,
40 lastComponent: undefined
41 }];
42
43 // Seed editLength = 0, i.e. the content starts with the same values
44 var newPos = this.extractCommon(bestPath[0], newString, oldString, 0, options);
45 if (bestPath[0].oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
46 // Identity per the equality and tokenizer
47 return done(buildValues(self, bestPath[0].lastComponent, newString, oldString, self.useLongestToken));
48 }
49
50 // Once we hit the right edge of the edit graph on some diagonal k, we can
51 // definitely reach the end of the edit graph in no more than k edits, so
52 // there's no point in considering any moves to diagonal k+1 any more (from
53 // which we're guaranteed to need at least k+1 more edits).
54 // Similarly, once we've reached the bottom of the edit graph, there's no
55 // point considering moves to lower diagonals.
56 // We record this fact by setting minDiagonalToConsider and
57 // maxDiagonalToConsider to some finite value once we've hit the edge of
58 // the edit graph.
59 // This optimization is not faithful to the original algorithm presented in
60 // Myers's paper, which instead pointlessly extends D-paths off the end of
61 // the edit graph - see page 7 of Myers's paper which notes this point
62 // explicitly and illustrates it with a diagram. This has major performance
63 // implications for some common scenarios. For instance, to compute a diff
64 // where the new text simply appends d characters on the end of the
65 // original text of length n, the true Myers algorithm will take O(n+d^2)
66 // time while this optimization needs only O(n+d) time.
67 var minDiagonalToConsider = -Infinity,
68 maxDiagonalToConsider = Infinity;
69
70 // Main worker method. checks all permutations of a given edit length for acceptance.
71 function execEditLength() {
72 for (var diagonalPath = Math.max(minDiagonalToConsider, -editLength); diagonalPath <= Math.min(maxDiagonalToConsider, editLength); diagonalPath += 2) {
73 var basePath = void 0;
74 var removePath = bestPath[diagonalPath - 1],
75 addPath = bestPath[diagonalPath + 1];
76 if (removePath) {
77 // No one else is going to attempt to use this value, clear it
78 bestPath[diagonalPath - 1] = undefined;
79 }
80 var canAdd = false;
81 if (addPath) {
82 // what newPos will be after we do an insertion:
83 var addPathNewPos = addPath.oldPos - diagonalPath;
84 canAdd = addPath && 0 <= addPathNewPos && addPathNewPos < newLen;
85 }
86 var canRemove = removePath && removePath.oldPos + 1 < oldLen;
87 if (!canAdd && !canRemove) {
88 // If this path is a terminal then prune
89 bestPath[diagonalPath] = undefined;
90 continue;
91 }
92
93 // Select the diagonal that we want to branch from. We select the prior
94 // path whose position in the old string is the farthest from the origin
95 // and does not pass the bounds of the diff graph
96 if (!canRemove || canAdd && removePath.oldPos < addPath.oldPos) {
97 basePath = self.addToPath(addPath, true, false, 0, options);
98 } else {
99 basePath = self.addToPath(removePath, false, true, 1, options);
100 }
101 newPos = self.extractCommon(basePath, newString, oldString, diagonalPath, options);
102 if (basePath.oldPos + 1 >= oldLen && newPos + 1 >= newLen) {
103 // If we have hit the end of both strings, then we are done
104 return done(buildValues(self, basePath.lastComponent, newString, oldString, self.useLongestToken));
105 } else {
106 bestPath[diagonalPath] = basePath;
107 if (basePath.oldPos + 1 >= oldLen) {
108 maxDiagonalToConsider = Math.min(maxDiagonalToConsider, diagonalPath - 1);
109 }
110 if (newPos + 1 >= newLen) {
111 minDiagonalToConsider = Math.max(minDiagonalToConsider, diagonalPath + 1);
112 }
113 }
114 }
115 editLength++;
116 }
117
118 // Performs the length of edit iteration. Is a bit fugly as this has to support the
119 // sync and async mode which is never fun. Loops over execEditLength until a value
120 // is produced, or until the edit length exceeds options.maxEditLength (if given),
121 // in which case it will return undefined.
122 if (callback) {
123 (function exec() {
124 setTimeout(function () {
125 if (editLength > maxEditLength || Date.now() > abortAfterTimestamp) {
126 return callback();
127 }
128 if (!execEditLength()) {
129 exec();
130 }
131 }, 0);
132 })();
133 } else {
134 while (editLength <= maxEditLength && Date.now() <= abortAfterTimestamp) {
135 var ret = execEditLength();
136 if (ret) {
137 return ret;
138 }
139 }
140 }
141 },
142 addToPath: function addToPath(path, added, removed, oldPosInc, options) {
143 var last = path.lastComponent;
144 if (last && !options.oneChangePerToken && last.added === added && last.removed === removed) {
145 return {
146 oldPos: path.oldPos + oldPosInc,
147 lastComponent: {
148 count: last.count + 1,
149 added: added,
150 removed: removed,
151 previousComponent: last.previousComponent
152 }
153 };
154 } else {
155 return {
156 oldPos: path.oldPos + oldPosInc,
157 lastComponent: {
158 count: 1,
159 added: added,
160 removed: removed,
161 previousComponent: last
162 }
163 };
164 }
165 },
166 extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath, options) {
167 var newLen = newString.length,
168 oldLen = oldString.length,
169 oldPos = basePath.oldPos,
170 newPos = oldPos - diagonalPath,
171 commonCount = 0;
172 while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(oldString[oldPos + 1], newString[newPos + 1], options)) {
173 newPos++;
174 oldPos++;
175 commonCount++;
176 if (options.oneChangePerToken) {
177 basePath.lastComponent = {
178 count: 1,
179 previousComponent: basePath.lastComponent,
180 added: false,
181 removed: false
182 };
183 }
184 }
185 if (commonCount && !options.oneChangePerToken) {
186 basePath.lastComponent = {
187 count: commonCount,
188 previousComponent: basePath.lastComponent,
189 added: false,
190 removed: false
191 };
192 }
193 basePath.oldPos = oldPos;
194 return newPos;
195 },
196 equals: function equals(left, right, options) {
197 if (options.comparator) {
198 return options.comparator(left, right);
199 } else {
200 return left === right || options.ignoreCase && left.toLowerCase() === right.toLowerCase();
201 }
202 },
203 removeEmpty: function removeEmpty(array) {
204 var ret = [];
205 for (var i = 0; i < array.length; i++) {
206 if (array[i]) {
207 ret.push(array[i]);
208 }
209 }
210 return ret;
211 },
212 castInput: function castInput(value) {
213 return value;
214 },
215 tokenize: function tokenize(value) {
216 return Array.from(value);
217 },
218 join: function join(chars) {
219 return chars.join('');
220 },
221 postProcess: function postProcess(changeObjects) {
222 return changeObjects;
223 }
224};
225function buildValues(diff, lastComponent, newString, oldString, useLongestToken) {
226 // First we convert our linked list of components in reverse order to an
227 // array in the right order:
228 var components = [];
229 var nextComponent;
230 while (lastComponent) {
231 components.push(lastComponent);
232 nextComponent = lastComponent.previousComponent;
233 delete lastComponent.previousComponent;
234 lastComponent = nextComponent;
235 }
236 components.reverse();
237 var componentPos = 0,
238 componentLen = components.length,
239 newPos = 0,
240 oldPos = 0;
241 for (; componentPos < componentLen; componentPos++) {
242 var component = components[componentPos];
243 if (!component.removed) {
244 if (!component.added && useLongestToken) {
245 var value = newString.slice(newPos, newPos + component.count);
246 value = value.map(function (value, i) {
247 var oldValue = oldString[oldPos + i];
248 return oldValue.length > value.length ? oldValue : value;
249 });
250 component.value = diff.join(value);
251 } else {
252 component.value = diff.join(newString.slice(newPos, newPos + component.count));
253 }
254 newPos += component.count;
255
256 // Common case
257 if (!component.added) {
258 oldPos += component.count;
259 }
260 } else {
261 component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));
262 oldPos += component.count;
263 }
264 }
265 return components;
266}
267
268var characterDiff = new Diff();
269function diffChars(oldStr, newStr, options) {
270 return characterDiff.diff(oldStr, newStr, options);
271}
272
273function longestCommonPrefix(str1, str2) {
274 var i;
275 for (i = 0; i < str1.length && i < str2.length; i++) {
276 if (str1[i] != str2[i]) {
277 return str1.slice(0, i);
278 }
279 }
280 return str1.slice(0, i);
281}
282function longestCommonSuffix(str1, str2) {
283 var i;
284
285 // Unlike longestCommonPrefix, we need a special case to handle all scenarios
286 // where we return the empty string since str1.slice(-0) will return the
287 // entire string.
288 if (!str1 || !str2 || str1[str1.length - 1] != str2[str2.length - 1]) {
289 return '';
290 }
291 for (i = 0; i < str1.length && i < str2.length; i++) {
292 if (str1[str1.length - (i + 1)] != str2[str2.length - (i + 1)]) {
293 return str1.slice(-i);
294 }
295 }
296 return str1.slice(-i);
297}
298function replacePrefix(string, oldPrefix, newPrefix) {
299 if (string.slice(0, oldPrefix.length) != oldPrefix) {
300 throw Error("string ".concat(JSON.stringify(string), " doesn't start with prefix ").concat(JSON.stringify(oldPrefix), "; this is a bug"));
301 }
302 return newPrefix + string.slice(oldPrefix.length);
303}
304function replaceSuffix(string, oldSuffix, newSuffix) {
305 if (!oldSuffix) {
306 return string + newSuffix;
307 }
308 if (string.slice(-oldSuffix.length) != oldSuffix) {
309 throw Error("string ".concat(JSON.stringify(string), " doesn't end with suffix ").concat(JSON.stringify(oldSuffix), "; this is a bug"));
310 }
311 return string.slice(0, -oldSuffix.length) + newSuffix;
312}
313function removePrefix(string, oldPrefix) {
314 return replacePrefix(string, oldPrefix, '');
315}
316function removeSuffix(string, oldSuffix) {
317 return replaceSuffix(string, oldSuffix, '');
318}
319function maximumOverlap(string1, string2) {
320 return string2.slice(0, overlapCount(string1, string2));
321}
322
323// Nicked from https://stackoverflow.com/a/60422853/1709587
324function overlapCount(a, b) {
325 // Deal with cases where the strings differ in length
326 var startA = 0;
327 if (a.length > b.length) {
328 startA = a.length - b.length;
329 }
330 var endB = b.length;
331 if (a.length < b.length) {
332 endB = a.length;
333 }
334 // Create a back-reference for each index
335 // that should be followed in case of a mismatch.
336 // We only need B to make these references:
337 var map = Array(endB);
338 var k = 0; // Index that lags behind j
339 map[0] = 0;
340 for (var j = 1; j < endB; j++) {
341 if (b[j] == b[k]) {
342 map[j] = map[k]; // skip over the same character (optional optimisation)
343 } else {
344 map[j] = k;
345 }
346 while (k > 0 && b[j] != b[k]) {
347 k = map[k];
348 }
349 if (b[j] == b[k]) {
350 k++;
351 }
352 }
353 // Phase 2: use these references while iterating over A
354 k = 0;
355 for (var i = startA; i < a.length; i++) {
356 while (k > 0 && a[i] != b[k]) {
357 k = map[k];
358 }
359 if (a[i] == b[k]) {
360 k++;
361 }
362 }
363 return k;
364}
365
366/**
367 * Returns true if the string consistently uses Windows line endings.
368 */
369function hasOnlyWinLineEndings(string) {
370 return string.includes('\r\n') && !string.startsWith('\n') && !string.match(/[^\r]\n/);
371}
372
373/**
374 * Returns true if the string consistently uses Unix line endings.
375 */
376function hasOnlyUnixLineEndings(string) {
377 return !string.includes('\r\n') && string.includes('\n');
378}
379
380// Based on https://en.wikipedia.org/wiki/Latin_script_in_Unicode
381//
382// Ranges and exceptions:
383// Latin-1 Supplement, 0080–00FF
384// - U+00D7 × Multiplication sign
385// - U+00F7 ÷ Division sign
386// Latin Extended-A, 0100–017F
387// Latin Extended-B, 0180–024F
388// IPA Extensions, 0250–02AF
389// Spacing Modifier Letters, 02B0–02FF
390// - U+02C7 ˇ &#711; Caron
391// - U+02D8 ˘ &#728; Breve
392// - U+02D9 ˙ &#729; Dot Above
393// - U+02DA ˚ &#730; Ring Above
394// - U+02DB ˛ &#731; Ogonek
395// - U+02DC ˜ &#732; Small Tilde
396// - U+02DD ˝ &#733; Double Acute Accent
397// Latin Extended Additional, 1E00–1EFF
398var extendedWordChars = "a-zA-Z0-9_\\u{C0}-\\u{FF}\\u{D8}-\\u{F6}\\u{F8}-\\u{2C6}\\u{2C8}-\\u{2D7}\\u{2DE}-\\u{2FF}\\u{1E00}-\\u{1EFF}";
399
400// Each token is one of the following:
401// - A punctuation mark plus the surrounding whitespace
402// - A word plus the surrounding whitespace
403// - Pure whitespace (but only in the special case where this the entire text
404// is just whitespace)
405//
406// We have to include surrounding whitespace in the tokens because the two
407// alternative approaches produce horribly broken results:
408// * If we just discard the whitespace, we can't fully reproduce the original
409// text from the sequence of tokens and any attempt to render the diff will
410// get the whitespace wrong.
411// * If we have separate tokens for whitespace, then in a typical text every
412// second token will be a single space character. But this often results in
413// the optimal diff between two texts being a perverse one that preserves
414// the spaces between words but deletes and reinserts actual common words.
415// See https://github.com/kpdecker/jsdiff/issues/160#issuecomment-1866099640
416// for an example.
417//
418// Keeping the surrounding whitespace of course has implications for .equals
419// and .join, not just .tokenize.
420
421// This regex does NOT fully implement the tokenization rules described above.
422// Instead, it gives runs of whitespace their own "token". The tokenize method
423// then handles stitching whitespace tokens onto adjacent word or punctuation
424// tokens.
425var tokenizeIncludingWhitespace = new RegExp("[".concat(extendedWordChars, "]+|\\s+|[^").concat(extendedWordChars, "]"), 'ug');
426var wordDiff = new Diff();
427wordDiff.equals = function (left, right, options) {
428 if (options.ignoreCase) {
429 left = left.toLowerCase();
430 right = right.toLowerCase();
431 }
432 return left.trim() === right.trim();
433};
434wordDiff.tokenize = function (value) {
435 var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
436 var parts;
437 if (options.intlSegmenter) {
438 if (options.intlSegmenter.resolvedOptions().granularity != 'word') {
439 throw new Error('The segmenter passed must have a granularity of "word"');
440 }
441 parts = Array.from(options.intlSegmenter.segment(value), function (segment) {
442 return segment.segment;
443 });
444 } else {
445 parts = value.match(tokenizeIncludingWhitespace) || [];
446 }
447 var tokens = [];
448 var prevPart = null;
449 parts.forEach(function (part) {
450 if (/\s/.test(part)) {
451 if (prevPart == null) {
452 tokens.push(part);
453 } else {
454 tokens.push(tokens.pop() + part);
455 }
456 } else if (/\s/.test(prevPart)) {
457 if (tokens[tokens.length - 1] == prevPart) {
458 tokens.push(tokens.pop() + part);
459 } else {
460 tokens.push(prevPart + part);
461 }
462 } else {
463 tokens.push(part);
464 }
465 prevPart = part;
466 });
467 return tokens;
468};
469wordDiff.join = function (tokens) {
470 // Tokens being joined here will always have appeared consecutively in the
471 // same text, so we can simply strip off the leading whitespace from all the
472 // tokens except the first (and except any whitespace-only tokens - but such
473 // a token will always be the first and only token anyway) and then join them
474 // and the whitespace around words and punctuation will end up correct.
475 return tokens.map(function (token, i) {
476 if (i == 0) {
477 return token;
478 } else {
479 return token.replace(/^\s+/, '');
480 }
481 }).join('');
482};
483wordDiff.postProcess = function (changes, options) {
484 if (!changes || options.oneChangePerToken) {
485 return changes;
486 }
487 var lastKeep = null;
488 // Change objects representing any insertion or deletion since the last
489 // "keep" change object. There can be at most one of each.
490 var insertion = null;
491 var deletion = null;
492 changes.forEach(function (change) {
493 if (change.added) {
494 insertion = change;
495 } else if (change.removed) {
496 deletion = change;
497 } else {
498 if (insertion || deletion) {
499 // May be false at start of text
500 dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, change);
501 }
502 lastKeep = change;
503 insertion = null;
504 deletion = null;
505 }
506 });
507 if (insertion || deletion) {
508 dedupeWhitespaceInChangeObjects(lastKeep, deletion, insertion, null);
509 }
510 return changes;
511};
512function diffWords(oldStr, newStr, options) {
513 // This option has never been documented and never will be (it's clearer to
514 // just call `diffWordsWithSpace` directly if you need that behavior), but
515 // has existed in jsdiff for a long time, so we retain support for it here
516 // for the sake of backwards compatibility.
517 if ((options === null || options === void 0 ? void 0 : options.ignoreWhitespace) != null && !options.ignoreWhitespace) {
518 return diffWordsWithSpace(oldStr, newStr, options);
519 }
520 return wordDiff.diff(oldStr, newStr, options);
521}
522function dedupeWhitespaceInChangeObjects(startKeep, deletion, insertion, endKeep) {
523 // Before returning, we tidy up the leading and trailing whitespace of the
524 // change objects to eliminate cases where trailing whitespace in one object
525 // is repeated as leading whitespace in the next.
526 // Below are examples of the outcomes we want here to explain the code.
527 // I=insert, K=keep, D=delete
528 // 1. diffing 'foo bar baz' vs 'foo baz'
529 // Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
530 // After cleanup, we want: K:'foo ' D:'bar ' K:'baz'
531 //
532 // 2. Diffing 'foo bar baz' vs 'foo qux baz'
533 // Prior to cleanup, we have K:'foo ' D:' bar ' I:' qux ' K:' baz'
534 // After cleanup, we want K:'foo ' D:'bar' I:'qux' K:' baz'
535 //
536 // 3. Diffing 'foo\nbar baz' vs 'foo baz'
537 // Prior to cleanup, we have K:'foo ' D:'\nbar ' K:' baz'
538 // After cleanup, we want K'foo' D:'\nbar' K:' baz'
539 //
540 // 4. Diffing 'foo baz' vs 'foo\nbar baz'
541 // Prior to cleanup, we have K:'foo\n' I:'\nbar ' K:' baz'
542 // After cleanup, we ideally want K'foo' I:'\nbar' K:' baz'
543 // but don't actually manage this currently (the pre-cleanup change
544 // objects don't contain enough information to make it possible).
545 //
546 // 5. Diffing 'foo bar baz' vs 'foo baz'
547 // Prior to cleanup, we have K:'foo ' D:' bar ' K:' baz'
548 // After cleanup, we want K:'foo ' D:' bar ' K:'baz'
549 //
550 // Our handling is unavoidably imperfect in the case where there's a single
551 // indel between keeps and the whitespace has changed. For instance, consider
552 // diffing 'foo\tbar\nbaz' vs 'foo baz'. Unless we create an extra change
553 // object to represent the insertion of the space character (which isn't even
554 // a token), we have no way to avoid losing information about the texts'
555 // original whitespace in the result we return. Still, we do our best to
556 // output something that will look sensible if we e.g. print it with
557 // insertions in green and deletions in red.
558
559 // Between two "keep" change objects (or before the first or after the last
560 // change object), we can have either:
561 // * A "delete" followed by an "insert"
562 // * Just an "insert"
563 // * Just a "delete"
564 // We handle the three cases separately.
565 if (deletion && insertion) {
566 var oldWsPrefix = deletion.value.match(/^\s*/)[0];
567 var oldWsSuffix = deletion.value.match(/\s*$/)[0];
568 var newWsPrefix = insertion.value.match(/^\s*/)[0];
569 var newWsSuffix = insertion.value.match(/\s*$/)[0];
570 if (startKeep) {
571 var commonWsPrefix = longestCommonPrefix(oldWsPrefix, newWsPrefix);
572 startKeep.value = replaceSuffix(startKeep.value, newWsPrefix, commonWsPrefix);
573 deletion.value = removePrefix(deletion.value, commonWsPrefix);
574 insertion.value = removePrefix(insertion.value, commonWsPrefix);
575 }
576 if (endKeep) {
577 var commonWsSuffix = longestCommonSuffix(oldWsSuffix, newWsSuffix);
578 endKeep.value = replacePrefix(endKeep.value, newWsSuffix, commonWsSuffix);
579 deletion.value = removeSuffix(deletion.value, commonWsSuffix);
580 insertion.value = removeSuffix(insertion.value, commonWsSuffix);
581 }
582 } else if (insertion) {
583 // The whitespaces all reflect what was in the new text rather than
584 // the old, so we essentially have no information about whitespace
585 // insertion or deletion. We just want to dedupe the whitespace.
586 // We do that by having each change object keep its trailing
587 // whitespace and deleting duplicate leading whitespace where
588 // present.
589 if (startKeep) {
590 insertion.value = insertion.value.replace(/^\s*/, '');
591 }
592 if (endKeep) {
593 endKeep.value = endKeep.value.replace(/^\s*/, '');
594 }
595 // otherwise we've got a deletion and no insertion
596 } else if (startKeep && endKeep) {
597 var newWsFull = endKeep.value.match(/^\s*/)[0],
598 delWsStart = deletion.value.match(/^\s*/)[0],
599 delWsEnd = deletion.value.match(/\s*$/)[0];
600
601 // Any whitespace that comes straight after startKeep in both the old and
602 // new texts, assign to startKeep and remove from the deletion.
603 var newWsStart = longestCommonPrefix(newWsFull, delWsStart);
604 deletion.value = removePrefix(deletion.value, newWsStart);
605
606 // Any whitespace that comes straight before endKeep in both the old and
607 // new texts, and hasn't already been assigned to startKeep, assign to
608 // endKeep and remove from the deletion.
609 var newWsEnd = longestCommonSuffix(removePrefix(newWsFull, newWsStart), delWsEnd);
610 deletion.value = removeSuffix(deletion.value, newWsEnd);
611 endKeep.value = replacePrefix(endKeep.value, newWsFull, newWsEnd);
612
613 // If there's any whitespace from the new text that HASN'T already been
614 // assigned, assign it to the start:
615 startKeep.value = replaceSuffix(startKeep.value, newWsFull, newWsFull.slice(0, newWsFull.length - newWsEnd.length));
616 } else if (endKeep) {
617 // We are at the start of the text. Preserve all the whitespace on
618 // endKeep, and just remove whitespace from the end of deletion to the
619 // extent that it overlaps with the start of endKeep.
620 var endKeepWsPrefix = endKeep.value.match(/^\s*/)[0];
621 var deletionWsSuffix = deletion.value.match(/\s*$/)[0];
622 var overlap = maximumOverlap(deletionWsSuffix, endKeepWsPrefix);
623 deletion.value = removeSuffix(deletion.value, overlap);
624 } else if (startKeep) {
625 // We are at the END of the text. Preserve all the whitespace on
626 // startKeep, and just remove whitespace from the start of deletion to
627 // the extent that it overlaps with the end of startKeep.
628 var startKeepWsSuffix = startKeep.value.match(/\s*$/)[0];
629 var deletionWsPrefix = deletion.value.match(/^\s*/)[0];
630 var _overlap = maximumOverlap(startKeepWsSuffix, deletionWsPrefix);
631 deletion.value = removePrefix(deletion.value, _overlap);
632 }
633}
634var wordWithSpaceDiff = new Diff();
635wordWithSpaceDiff.tokenize = function (value) {
636 // Slightly different to the tokenizeIncludingWhitespace regex used above in
637 // that this one treats each individual newline as a distinct tokens, rather
638 // than merging them into other surrounding whitespace. This was requested
639 // in https://github.com/kpdecker/jsdiff/issues/180 &
640 // https://github.com/kpdecker/jsdiff/issues/211
641 var regex = new RegExp("(\\r?\\n)|[".concat(extendedWordChars, "]+|[^\\S\\n\\r]+|[^").concat(extendedWordChars, "]"), 'ug');
642 return value.match(regex) || [];
643};
644function diffWordsWithSpace(oldStr, newStr, options) {
645 return wordWithSpaceDiff.diff(oldStr, newStr, options);
646}
647
648function generateOptions(options, defaults) {
649 if (typeof options === 'function') {
650 defaults.callback = options;
651 } else if (options) {
652 for (var name in options) {
653 /* istanbul ignore else */
654 if (options.hasOwnProperty(name)) {
655 defaults[name] = options[name];
656 }
657 }
658 }
659 return defaults;
660}
661
662var lineDiff = new Diff();
663lineDiff.tokenize = function (value, options) {
664 if (options.stripTrailingCr) {
665 // remove one \r before \n to match GNU diff's --strip-trailing-cr behavior
666 value = value.replace(/\r\n/g, '\n');
667 }
668 var retLines = [],
669 linesAndNewlines = value.split(/(\n|\r\n)/);
670
671 // Ignore the final empty token that occurs if the string ends with a new line
672 if (!linesAndNewlines[linesAndNewlines.length - 1]) {
673 linesAndNewlines.pop();
674 }
675
676 // Merge the content and line separators into single tokens
677 for (var i = 0; i < linesAndNewlines.length; i++) {
678 var line = linesAndNewlines[i];
679 if (i % 2 && !options.newlineIsToken) {
680 retLines[retLines.length - 1] += line;
681 } else {
682 retLines.push(line);
683 }
684 }
685 return retLines;
686};
687lineDiff.equals = function (left, right, options) {
688 // If we're ignoring whitespace, we need to normalise lines by stripping
689 // whitespace before checking equality. (This has an annoying interaction
690 // with newlineIsToken that requires special handling: if newlines get their
691 // own token, then we DON'T want to trim the *newline* tokens down to empty
692 // strings, since this would cause us to treat whitespace-only line content
693 // as equal to a separator between lines, which would be weird and
694 // inconsistent with the documented behavior of the options.)
695 if (options.ignoreWhitespace) {
696 if (!options.newlineIsToken || !left.includes('\n')) {
697 left = left.trim();
698 }
699 if (!options.newlineIsToken || !right.includes('\n')) {
700 right = right.trim();
701 }
702 } else if (options.ignoreNewlineAtEof && !options.newlineIsToken) {
703 if (left.endsWith('\n')) {
704 left = left.slice(0, -1);
705 }
706 if (right.endsWith('\n')) {
707 right = right.slice(0, -1);
708 }
709 }
710 return Diff.prototype.equals.call(this, left, right, options);
711};
712function diffLines(oldStr, newStr, callback) {
713 return lineDiff.diff(oldStr, newStr, callback);
714}
715
716// Kept for backwards compatibility. This is a rather arbitrary wrapper method
717// that just calls `diffLines` with `ignoreWhitespace: true`. It's confusing to
718// have two ways to do exactly the same thing in the API, so we no longer
719// document this one (library users should explicitly use `diffLines` with
720// `ignoreWhitespace: true` instead) but we keep it around to maintain
721// compatibility with code that used old versions.
722function diffTrimmedLines(oldStr, newStr, callback) {
723 var options = generateOptions(callback, {
724 ignoreWhitespace: true
725 });
726 return lineDiff.diff(oldStr, newStr, options);
727}
728
729var sentenceDiff = new Diff();
730sentenceDiff.tokenize = function (value) {
731 return value.split(/(\S.+?[.!?])(?=\s+|$)/);
732};
733function diffSentences(oldStr, newStr, callback) {
734 return sentenceDiff.diff(oldStr, newStr, callback);
735}
736
737var cssDiff = new Diff();
738cssDiff.tokenize = function (value) {
739 return value.split(/([{}:;,]|\s+)/);
740};
741function diffCss(oldStr, newStr, callback) {
742 return cssDiff.diff(oldStr, newStr, callback);
743}
744
745function ownKeys(e, r) {
746 var t = Object.keys(e);
747 if (Object.getOwnPropertySymbols) {
748 var o = Object.getOwnPropertySymbols(e);
749 r && (o = o.filter(function (r) {
750 return Object.getOwnPropertyDescriptor(e, r).enumerable;
751 })), t.push.apply(t, o);
752 }
753 return t;
754}
755function _objectSpread2(e) {
756 for (var r = 1; r < arguments.length; r++) {
757 var t = null != arguments[r] ? arguments[r] : {};
758 r % 2 ? ownKeys(Object(t), !0).forEach(function (r) {
759 _defineProperty(e, r, t[r]);
760 }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) {
761 Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r));
762 });
763 }
764 return e;
765}
766function _toPrimitive(t, r) {
767 if ("object" != typeof t || !t) return t;
768 var e = t[Symbol.toPrimitive];
769 if (void 0 !== e) {
770 var i = e.call(t, r || "default");
771 if ("object" != typeof i) return i;
772 throw new TypeError("@@toPrimitive must return a primitive value.");
773 }
774 return ("string" === r ? String : Number)(t);
775}
776function _toPropertyKey(t) {
777 var i = _toPrimitive(t, "string");
778 return "symbol" == typeof i ? i : i + "";
779}
780function _typeof(o) {
781 "@babel/helpers - typeof";
782
783 return _typeof = "function" == typeof Symbol && "symbol" == typeof Symbol.iterator ? function (o) {
784 return typeof o;
785 } : function (o) {
786 return o && "function" == typeof Symbol && o.constructor === Symbol && o !== Symbol.prototype ? "symbol" : typeof o;
787 }, _typeof(o);
788}
789function _defineProperty(obj, key, value) {
790 key = _toPropertyKey(key);
791 if (key in obj) {
792 Object.defineProperty(obj, key, {
793 value: value,
794 enumerable: true,
795 configurable: true,
796 writable: true
797 });
798 } else {
799 obj[key] = value;
800 }
801 return obj;
802}
803function _toConsumableArray(arr) {
804 return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread();
805}
806function _arrayWithoutHoles(arr) {
807 if (Array.isArray(arr)) return _arrayLikeToArray(arr);
808}
809function _iterableToArray(iter) {
810 if (typeof Symbol !== "undefined" && iter[Symbol.iterator] != null || iter["@@iterator"] != null) return Array.from(iter);
811}
812function _unsupportedIterableToArray(o, minLen) {
813 if (!o) return;
814 if (typeof o === "string") return _arrayLikeToArray(o, minLen);
815 var n = Object.prototype.toString.call(o).slice(8, -1);
816 if (n === "Object" && o.constructor) n = o.constructor.name;
817 if (n === "Map" || n === "Set") return Array.from(o);
818 if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
819}
820function _arrayLikeToArray(arr, len) {
821 if (len == null || len > arr.length) len = arr.length;
822 for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
823 return arr2;
824}
825function _nonIterableSpread() {
826 throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
827}
828
829var jsonDiff = new Diff();
830// Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
831// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
832jsonDiff.useLongestToken = true;
833jsonDiff.tokenize = lineDiff.tokenize;
834jsonDiff.castInput = function (value, options) {
835 var undefinedReplacement = options.undefinedReplacement,
836 _options$stringifyRep = options.stringifyReplacer,
837 stringifyReplacer = _options$stringifyRep === void 0 ? function (k, v) {
838 return typeof v === 'undefined' ? undefinedReplacement : v;
839 } : _options$stringifyRep;
840 return typeof value === 'string' ? value : JSON.stringify(canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, ' ');
841};
842jsonDiff.equals = function (left, right, options) {
843 return Diff.prototype.equals.call(jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'), options);
844};
845function diffJson(oldObj, newObj, options) {
846 return jsonDiff.diff(oldObj, newObj, options);
847}
848
849// This function handles the presence of circular references by bailing out when encountering an
850// object that is already on the "stack" of items being processed. Accepts an optional replacer
851function canonicalize(obj, stack, replacementStack, replacer, key) {
852 stack = stack || [];
853 replacementStack = replacementStack || [];
854 if (replacer) {
855 obj = replacer(key, obj);
856 }
857 var i;
858 for (i = 0; i < stack.length; i += 1) {
859 if (stack[i] === obj) {
860 return replacementStack[i];
861 }
862 }
863 var canonicalizedObj;
864 if ('[object Array]' === Object.prototype.toString.call(obj)) {
865 stack.push(obj);
866 canonicalizedObj = new Array(obj.length);
867 replacementStack.push(canonicalizedObj);
868 for (i = 0; i < obj.length; i += 1) {
869 canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, key);
870 }
871 stack.pop();
872 replacementStack.pop();
873 return canonicalizedObj;
874 }
875 if (obj && obj.toJSON) {
876 obj = obj.toJSON();
877 }
878 if (_typeof(obj) === 'object' && obj !== null) {
879 stack.push(obj);
880 canonicalizedObj = {};
881 replacementStack.push(canonicalizedObj);
882 var sortedKeys = [],
883 _key;
884 for (_key in obj) {
885 /* istanbul ignore else */
886 if (Object.prototype.hasOwnProperty.call(obj, _key)) {
887 sortedKeys.push(_key);
888 }
889 }
890 sortedKeys.sort();
891 for (i = 0; i < sortedKeys.length; i += 1) {
892 _key = sortedKeys[i];
893 canonicalizedObj[_key] = canonicalize(obj[_key], stack, replacementStack, replacer, _key);
894 }
895 stack.pop();
896 replacementStack.pop();
897 } else {
898 canonicalizedObj = obj;
899 }
900 return canonicalizedObj;
901}
902
903var arrayDiff = new Diff();
904arrayDiff.tokenize = function (value) {
905 return value.slice();
906};
907arrayDiff.join = arrayDiff.removeEmpty = function (value) {
908 return value;
909};
910function diffArrays(oldArr, newArr, callback) {
911 return arrayDiff.diff(oldArr, newArr, callback);
912}
913
914function unixToWin(patch) {
915 if (Array.isArray(patch)) {
916 return patch.map(unixToWin);
917 }
918 return _objectSpread2(_objectSpread2({}, patch), {}, {
919 hunks: patch.hunks.map(function (hunk) {
920 return _objectSpread2(_objectSpread2({}, hunk), {}, {
921 lines: hunk.lines.map(function (line, i) {
922 var _hunk$lines;
923 return line.startsWith('\\') || line.endsWith('\r') || (_hunk$lines = hunk.lines[i + 1]) !== null && _hunk$lines !== void 0 && _hunk$lines.startsWith('\\') ? line : line + '\r';
924 })
925 });
926 })
927 });
928}
929function winToUnix(patch) {
930 if (Array.isArray(patch)) {
931 return patch.map(winToUnix);
932 }
933 return _objectSpread2(_objectSpread2({}, patch), {}, {
934 hunks: patch.hunks.map(function (hunk) {
935 return _objectSpread2(_objectSpread2({}, hunk), {}, {
936 lines: hunk.lines.map(function (line) {
937 return line.endsWith('\r') ? line.substring(0, line.length - 1) : line;
938 })
939 });
940 })
941 });
942}
943
944/**
945 * Returns true if the patch consistently uses Unix line endings (or only involves one line and has
946 * no line endings).
947 */
948function isUnix(patch) {
949 if (!Array.isArray(patch)) {
950 patch = [patch];
951 }
952 return !patch.some(function (index) {
953 return index.hunks.some(function (hunk) {
954 return hunk.lines.some(function (line) {
955 return !line.startsWith('\\') && line.endsWith('\r');
956 });
957 });
958 });
959}
960
961/**
962 * Returns true if the patch uses Windows line endings and only Windows line endings.
963 */
964function isWin(patch) {
965 if (!Array.isArray(patch)) {
966 patch = [patch];
967 }
968 return patch.some(function (index) {
969 return index.hunks.some(function (hunk) {
970 return hunk.lines.some(function (line) {
971 return line.endsWith('\r');
972 });
973 });
974 }) && patch.every(function (index) {
975 return index.hunks.every(function (hunk) {
976 return hunk.lines.every(function (line, i) {
977 var _hunk$lines2;
978 return line.startsWith('\\') || line.endsWith('\r') || ((_hunk$lines2 = hunk.lines[i + 1]) === null || _hunk$lines2 === void 0 ? void 0 : _hunk$lines2.startsWith('\\'));
979 });
980 });
981 });
982}
983
984function parsePatch(uniDiff) {
985 var diffstr = uniDiff.split(/\n/),
986 list = [],
987 i = 0;
988 function parseIndex() {
989 var index = {};
990 list.push(index);
991
992 // Parse diff metadata
993 while (i < diffstr.length) {
994 var line = diffstr[i];
995
996 // File header found, end parsing diff metadata
997 if (/^(\-\-\-|\+\+\+|@@)\s/.test(line)) {
998 break;
999 }
1000
1001 // Diff index
1002 var header = /^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(line);
1003 if (header) {
1004 index.index = header[1];
1005 }
1006 i++;
1007 }
1008
1009 // Parse file headers if they are defined. Unified diff requires them, but
1010 // there's no technical issues to have an isolated hunk without file header
1011 parseFileHeader(index);
1012 parseFileHeader(index);
1013
1014 // Parse hunks
1015 index.hunks = [];
1016 while (i < diffstr.length) {
1017 var _line = diffstr[i];
1018 if (/^(Index:\s|diff\s|\-\-\-\s|\+\+\+\s|===================================================================)/.test(_line)) {
1019 break;
1020 } else if (/^@@/.test(_line)) {
1021 index.hunks.push(parseHunk());
1022 } else if (_line) {
1023 throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(_line));
1024 } else {
1025 i++;
1026 }
1027 }
1028 }
1029
1030 // Parses the --- and +++ headers, if none are found, no lines
1031 // are consumed.
1032 function parseFileHeader(index) {
1033 var fileHeader = /^(---|\+\+\+)\s+(.*)\r?$/.exec(diffstr[i]);
1034 if (fileHeader) {
1035 var keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';
1036 var data = fileHeader[2].split('\t', 2);
1037 var fileName = data[0].replace(/\\\\/g, '\\');
1038 if (/^".*"$/.test(fileName)) {
1039 fileName = fileName.substr(1, fileName.length - 2);
1040 }
1041 index[keyPrefix + 'FileName'] = fileName;
1042 index[keyPrefix + 'Header'] = (data[1] || '').trim();
1043 i++;
1044 }
1045 }
1046
1047 // Parses a hunk
1048 // This assumes that we are at the start of a hunk.
1049 function parseHunk() {
1050 var chunkHeaderIndex = i,
1051 chunkHeaderLine = diffstr[i++],
1052 chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
1053 var hunk = {
1054 oldStart: +chunkHeader[1],
1055 oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
1056 newStart: +chunkHeader[3],
1057 newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
1058 lines: []
1059 };
1060
1061 // Unified Diff Format quirk: If the chunk size is 0,
1062 // the first number is one lower than one would expect.
1063 // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
1064 if (hunk.oldLines === 0) {
1065 hunk.oldStart += 1;
1066 }
1067 if (hunk.newLines === 0) {
1068 hunk.newStart += 1;
1069 }
1070 var addCount = 0,
1071 removeCount = 0;
1072 for (; i < diffstr.length && (removeCount < hunk.oldLines || addCount < hunk.newLines || (_diffstr$i = diffstr[i]) !== null && _diffstr$i !== void 0 && _diffstr$i.startsWith('\\')); i++) {
1073 var _diffstr$i;
1074 var operation = diffstr[i].length == 0 && i != diffstr.length - 1 ? ' ' : diffstr[i][0];
1075 if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
1076 hunk.lines.push(diffstr[i]);
1077 if (operation === '+') {
1078 addCount++;
1079 } else if (operation === '-') {
1080 removeCount++;
1081 } else if (operation === ' ') {
1082 addCount++;
1083 removeCount++;
1084 }
1085 } else {
1086 throw new Error("Hunk at line ".concat(chunkHeaderIndex + 1, " contained invalid line ").concat(diffstr[i]));
1087 }
1088 }
1089
1090 // Handle the empty block count case
1091 if (!addCount && hunk.newLines === 1) {
1092 hunk.newLines = 0;
1093 }
1094 if (!removeCount && hunk.oldLines === 1) {
1095 hunk.oldLines = 0;
1096 }
1097
1098 // Perform sanity checking
1099 if (addCount !== hunk.newLines) {
1100 throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
1101 }
1102 if (removeCount !== hunk.oldLines) {
1103 throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
1104 }
1105 return hunk;
1106 }
1107 while (i < diffstr.length) {
1108 parseIndex();
1109 }
1110 return list;
1111}
1112
1113// Iterator that traverses in the range of [min, max], stepping
1114// by distance from a given start position. I.e. for [0, 4], with
1115// start of 2, this will iterate 2, 3, 1, 4, 0.
1116function distanceIterator (start, minLine, maxLine) {
1117 var wantForward = true,
1118 backwardExhausted = false,
1119 forwardExhausted = false,
1120 localOffset = 1;
1121 return function iterator() {
1122 if (wantForward && !forwardExhausted) {
1123 if (backwardExhausted) {
1124 localOffset++;
1125 } else {
1126 wantForward = false;
1127 }
1128
1129 // Check if trying to fit beyond text length, and if not, check it fits
1130 // after offset location (or desired location on first iteration)
1131 if (start + localOffset <= maxLine) {
1132 return start + localOffset;
1133 }
1134 forwardExhausted = true;
1135 }
1136 if (!backwardExhausted) {
1137 if (!forwardExhausted) {
1138 wantForward = true;
1139 }
1140
1141 // Check if trying to fit before text beginning, and if not, check it fits
1142 // before offset location
1143 if (minLine <= start - localOffset) {
1144 return start - localOffset++;
1145 }
1146 backwardExhausted = true;
1147 return iterator();
1148 }
1149
1150 // We tried to fit hunk before text beginning and beyond text length, then
1151 // hunk can't fit on the text. Return undefined
1152 };
1153}
1154
1155function applyPatch(source, uniDiff) {
1156 var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
1157 if (typeof uniDiff === 'string') {
1158 uniDiff = parsePatch(uniDiff);
1159 }
1160 if (Array.isArray(uniDiff)) {
1161 if (uniDiff.length > 1) {
1162 throw new Error('applyPatch only works with a single input.');
1163 }
1164 uniDiff = uniDiff[0];
1165 }
1166 if (options.autoConvertLineEndings || options.autoConvertLineEndings == null) {
1167 if (hasOnlyWinLineEndings(source) && isUnix(uniDiff)) {
1168 uniDiff = unixToWin(uniDiff);
1169 } else if (hasOnlyUnixLineEndings(source) && isWin(uniDiff)) {
1170 uniDiff = winToUnix(uniDiff);
1171 }
1172 }
1173
1174 // Apply the diff to the input
1175 var lines = source.split('\n'),
1176 hunks = uniDiff.hunks,
1177 compareLine = options.compareLine || function (lineNumber, line, operation, patchContent) {
1178 return line === patchContent;
1179 },
1180 fuzzFactor = options.fuzzFactor || 0,
1181 minLine = 0;
1182 if (fuzzFactor < 0 || !Number.isInteger(fuzzFactor)) {
1183 throw new Error('fuzzFactor must be a non-negative integer');
1184 }
1185
1186 // Special case for empty patch.
1187 if (!hunks.length) {
1188 return source;
1189 }
1190
1191 // Before anything else, handle EOFNL insertion/removal. If the patch tells us to make a change
1192 // to the EOFNL that is redundant/impossible - i.e. to remove a newline that's not there, or add a
1193 // newline that already exists - then we either return false and fail to apply the patch (if
1194 // fuzzFactor is 0) or simply ignore the problem and do nothing (if fuzzFactor is >0).
1195 // If we do need to remove/add a newline at EOF, this will always be in the final hunk:
1196 var prevLine = '',
1197 removeEOFNL = false,
1198 addEOFNL = false;
1199 for (var i = 0; i < hunks[hunks.length - 1].lines.length; i++) {
1200 var line = hunks[hunks.length - 1].lines[i];
1201 if (line[0] == '\\') {
1202 if (prevLine[0] == '+') {
1203 removeEOFNL = true;
1204 } else if (prevLine[0] == '-') {
1205 addEOFNL = true;
1206 }
1207 }
1208 prevLine = line;
1209 }
1210 if (removeEOFNL) {
1211 if (addEOFNL) {
1212 // This means the final line gets changed but doesn't have a trailing newline in either the
1213 // original or patched version. In that case, we do nothing if fuzzFactor > 0, and if
1214 // fuzzFactor is 0, we simply validate that the source file has no trailing newline.
1215 if (!fuzzFactor && lines[lines.length - 1] == '') {
1216 return false;
1217 }
1218 } else if (lines[lines.length - 1] == '') {
1219 lines.pop();
1220 } else if (!fuzzFactor) {
1221 return false;
1222 }
1223 } else if (addEOFNL) {
1224 if (lines[lines.length - 1] != '') {
1225 lines.push('');
1226 } else if (!fuzzFactor) {
1227 return false;
1228 }
1229 }
1230
1231 /**
1232 * Checks if the hunk can be made to fit at the provided location with at most `maxErrors`
1233 * insertions, substitutions, or deletions, while ensuring also that:
1234 * - lines deleted in the hunk match exactly, and
1235 * - wherever an insertion operation or block of insertion operations appears in the hunk, the
1236 * immediately preceding and following lines of context match exactly
1237 *
1238 * `toPos` should be set such that lines[toPos] is meant to match hunkLines[0].
1239 *
1240 * If the hunk can be applied, returns an object with properties `oldLineLastI` and
1241 * `replacementLines`. Otherwise, returns null.
1242 */
1243 function applyHunk(hunkLines, toPos, maxErrors) {
1244 var hunkLinesI = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : 0;
1245 var lastContextLineMatched = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : true;
1246 var patchedLines = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : [];
1247 var patchedLinesLength = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : 0;
1248 var nConsecutiveOldContextLines = 0;
1249 var nextContextLineMustMatch = false;
1250 for (; hunkLinesI < hunkLines.length; hunkLinesI++) {
1251 var hunkLine = hunkLines[hunkLinesI],
1252 operation = hunkLine.length > 0 ? hunkLine[0] : ' ',
1253 content = hunkLine.length > 0 ? hunkLine.substr(1) : hunkLine;
1254 if (operation === '-') {
1255 if (compareLine(toPos + 1, lines[toPos], operation, content)) {
1256 toPos++;
1257 nConsecutiveOldContextLines = 0;
1258 } else {
1259 if (!maxErrors || lines[toPos] == null) {
1260 return null;
1261 }
1262 patchedLines[patchedLinesLength] = lines[toPos];
1263 return applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1);
1264 }
1265 }
1266 if (operation === '+') {
1267 if (!lastContextLineMatched) {
1268 return null;
1269 }
1270 patchedLines[patchedLinesLength] = content;
1271 patchedLinesLength++;
1272 nConsecutiveOldContextLines = 0;
1273 nextContextLineMustMatch = true;
1274 }
1275 if (operation === ' ') {
1276 nConsecutiveOldContextLines++;
1277 patchedLines[patchedLinesLength] = lines[toPos];
1278 if (compareLine(toPos + 1, lines[toPos], operation, content)) {
1279 patchedLinesLength++;
1280 lastContextLineMatched = true;
1281 nextContextLineMustMatch = false;
1282 toPos++;
1283 } else {
1284 if (nextContextLineMustMatch || !maxErrors) {
1285 return null;
1286 }
1287
1288 // Consider 3 possibilities in sequence:
1289 // 1. lines contains a *substitution* not included in the patch context, or
1290 // 2. lines contains an *insertion* not included in the patch context, or
1291 // 3. lines contains a *deletion* not included in the patch context
1292 // The first two options are of course only possible if the line from lines is non-null -
1293 // i.e. only option 3 is possible if we've overrun the end of the old file.
1294 return lines[toPos] && (applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength + 1) || applyHunk(hunkLines, toPos + 1, maxErrors - 1, hunkLinesI, false, patchedLines, patchedLinesLength + 1)) || applyHunk(hunkLines, toPos, maxErrors - 1, hunkLinesI + 1, false, patchedLines, patchedLinesLength);
1295 }
1296 }
1297 }
1298
1299 // Before returning, trim any unmodified context lines off the end of patchedLines and reduce
1300 // toPos (and thus oldLineLastI) accordingly. This allows later hunks to be applied to a region
1301 // that starts in this hunk's trailing context.
1302 patchedLinesLength -= nConsecutiveOldContextLines;
1303 toPos -= nConsecutiveOldContextLines;
1304 patchedLines.length = patchedLinesLength;
1305 return {
1306 patchedLines: patchedLines,
1307 oldLineLastI: toPos - 1
1308 };
1309 }
1310 var resultLines = [];
1311
1312 // Search best fit offsets for each hunk based on the previous ones
1313 var prevHunkOffset = 0;
1314 for (var _i = 0; _i < hunks.length; _i++) {
1315 var hunk = hunks[_i];
1316 var hunkResult = void 0;
1317 var maxLine = lines.length - hunk.oldLines + fuzzFactor;
1318 var toPos = void 0;
1319 for (var maxErrors = 0; maxErrors <= fuzzFactor; maxErrors++) {
1320 toPos = hunk.oldStart + prevHunkOffset - 1;
1321 var iterator = distanceIterator(toPos, minLine, maxLine);
1322 for (; toPos !== undefined; toPos = iterator()) {
1323 hunkResult = applyHunk(hunk.lines, toPos, maxErrors);
1324 if (hunkResult) {
1325 break;
1326 }
1327 }
1328 if (hunkResult) {
1329 break;
1330 }
1331 }
1332 if (!hunkResult) {
1333 return false;
1334 }
1335
1336 // Copy everything from the end of where we applied the last hunk to the start of this hunk
1337 for (var _i2 = minLine; _i2 < toPos; _i2++) {
1338 resultLines.push(lines[_i2]);
1339 }
1340
1341 // Add the lines produced by applying the hunk:
1342 for (var _i3 = 0; _i3 < hunkResult.patchedLines.length; _i3++) {
1343 var _line = hunkResult.patchedLines[_i3];
1344 resultLines.push(_line);
1345 }
1346
1347 // Set lower text limit to end of the current hunk, so next ones don't try
1348 // to fit over already patched text
1349 minLine = hunkResult.oldLineLastI + 1;
1350
1351 // Note the offset between where the patch said the hunk should've applied and where we
1352 // applied it, so we can adjust future hunks accordingly:
1353 prevHunkOffset = toPos + 1 - hunk.oldStart;
1354 }
1355
1356 // Copy over the rest of the lines from the old text
1357 for (var _i4 = minLine; _i4 < lines.length; _i4++) {
1358 resultLines.push(lines[_i4]);
1359 }
1360 return resultLines.join('\n');
1361}
1362
1363// Wrapper that supports multiple file patches via callbacks.
1364function applyPatches(uniDiff, options) {
1365 if (typeof uniDiff === 'string') {
1366 uniDiff = parsePatch(uniDiff);
1367 }
1368 var currentIndex = 0;
1369 function processIndex() {
1370 var index = uniDiff[currentIndex++];
1371 if (!index) {
1372 return options.complete();
1373 }
1374 options.loadFile(index, function (err, data) {
1375 if (err) {
1376 return options.complete(err);
1377 }
1378 var updatedContent = applyPatch(data, index, options);
1379 options.patched(index, updatedContent, function (err) {
1380 if (err) {
1381 return options.complete(err);
1382 }
1383 processIndex();
1384 });
1385 });
1386 }
1387 processIndex();
1388}
1389
1390function structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
1391 if (!options) {
1392 options = {};
1393 }
1394 if (typeof options === 'function') {
1395 options = {
1396 callback: options
1397 };
1398 }
1399 if (typeof options.context === 'undefined') {
1400 options.context = 4;
1401 }
1402 if (options.newlineIsToken) {
1403 throw new Error('newlineIsToken may not be used with patch-generation functions, only with diffing functions');
1404 }
1405 if (!options.callback) {
1406 return diffLinesResultToPatch(diffLines(oldStr, newStr, options));
1407 } else {
1408 var _options = options,
1409 _callback = _options.callback;
1410 diffLines(oldStr, newStr, _objectSpread2(_objectSpread2({}, options), {}, {
1411 callback: function callback(diff) {
1412 var patch = diffLinesResultToPatch(diff);
1413 _callback(patch);
1414 }
1415 }));
1416 }
1417 function diffLinesResultToPatch(diff) {
1418 // STEP 1: Build up the patch with no "\ No newline at end of file" lines and with the arrays
1419 // of lines containing trailing newline characters. We'll tidy up later...
1420
1421 if (!diff) {
1422 return;
1423 }
1424 diff.push({
1425 value: '',
1426 lines: []
1427 }); // Append an empty value to make cleanup easier
1428
1429 function contextLines(lines) {
1430 return lines.map(function (entry) {
1431 return ' ' + entry;
1432 });
1433 }
1434 var hunks = [];
1435 var oldRangeStart = 0,
1436 newRangeStart = 0,
1437 curRange = [],
1438 oldLine = 1,
1439 newLine = 1;
1440 var _loop = function _loop() {
1441 var current = diff[i],
1442 lines = current.lines || splitLines(current.value);
1443 current.lines = lines;
1444 if (current.added || current.removed) {
1445 var _curRange;
1446 // If we have previous context, start with that
1447 if (!oldRangeStart) {
1448 var prev = diff[i - 1];
1449 oldRangeStart = oldLine;
1450 newRangeStart = newLine;
1451 if (prev) {
1452 curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
1453 oldRangeStart -= curRange.length;
1454 newRangeStart -= curRange.length;
1455 }
1456 }
1457
1458 // Output our changes
1459 (_curRange = curRange).push.apply(_curRange, _toConsumableArray(lines.map(function (entry) {
1460 return (current.added ? '+' : '-') + entry;
1461 })));
1462
1463 // Track the updated file position
1464 if (current.added) {
1465 newLine += lines.length;
1466 } else {
1467 oldLine += lines.length;
1468 }
1469 } else {
1470 // Identical context lines. Track line changes
1471 if (oldRangeStart) {
1472 // Close out any changes that have been output (or join overlapping)
1473 if (lines.length <= options.context * 2 && i < diff.length - 2) {
1474 var _curRange2;
1475 // Overlapping
1476 (_curRange2 = curRange).push.apply(_curRange2, _toConsumableArray(contextLines(lines)));
1477 } else {
1478 var _curRange3;
1479 // end the range and output
1480 var contextSize = Math.min(lines.length, options.context);
1481 (_curRange3 = curRange).push.apply(_curRange3, _toConsumableArray(contextLines(lines.slice(0, contextSize))));
1482 var _hunk = {
1483 oldStart: oldRangeStart,
1484 oldLines: oldLine - oldRangeStart + contextSize,
1485 newStart: newRangeStart,
1486 newLines: newLine - newRangeStart + contextSize,
1487 lines: curRange
1488 };
1489 hunks.push(_hunk);
1490 oldRangeStart = 0;
1491 newRangeStart = 0;
1492 curRange = [];
1493 }
1494 }
1495 oldLine += lines.length;
1496 newLine += lines.length;
1497 }
1498 };
1499 for (var i = 0; i < diff.length; i++) {
1500 _loop();
1501 }
1502
1503 // Step 2: eliminate the trailing `\n` from each line of each hunk, and, where needed, add
1504 // "\ No newline at end of file".
1505 for (var _i = 0, _hunks = hunks; _i < _hunks.length; _i++) {
1506 var hunk = _hunks[_i];
1507 for (var _i2 = 0; _i2 < hunk.lines.length; _i2++) {
1508 if (hunk.lines[_i2].endsWith('\n')) {
1509 hunk.lines[_i2] = hunk.lines[_i2].slice(0, -1);
1510 } else {
1511 hunk.lines.splice(_i2 + 1, 0, '\\ No newline at end of file');
1512 _i2++; // Skip the line we just added, then continue iterating
1513 }
1514 }
1515 }
1516 return {
1517 oldFileName: oldFileName,
1518 newFileName: newFileName,
1519 oldHeader: oldHeader,
1520 newHeader: newHeader,
1521 hunks: hunks
1522 };
1523 }
1524}
1525function formatPatch(diff) {
1526 if (Array.isArray(diff)) {
1527 return diff.map(formatPatch).join('\n');
1528 }
1529 var ret = [];
1530 if (diff.oldFileName == diff.newFileName) {
1531 ret.push('Index: ' + diff.oldFileName);
1532 }
1533 ret.push('===================================================================');
1534 ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\t' + diff.oldHeader));
1535 ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\t' + diff.newHeader));
1536 for (var i = 0; i < diff.hunks.length; i++) {
1537 var hunk = diff.hunks[i];
1538 // Unified Diff Format quirk: If the chunk size is 0,
1539 // the first number is one lower than one would expect.
1540 // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
1541 if (hunk.oldLines === 0) {
1542 hunk.oldStart -= 1;
1543 }
1544 if (hunk.newLines === 0) {
1545 hunk.newStart -= 1;
1546 }
1547 ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines + ' +' + hunk.newStart + ',' + hunk.newLines + ' @@');
1548 ret.push.apply(ret, hunk.lines);
1549 }
1550 return ret.join('\n') + '\n';
1551}
1552function createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
1553 var _options2;
1554 if (typeof options === 'function') {
1555 options = {
1556 callback: options
1557 };
1558 }
1559 if (!((_options2 = options) !== null && _options2 !== void 0 && _options2.callback)) {
1560 var patchObj = structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options);
1561 if (!patchObj) {
1562 return;
1563 }
1564 return formatPatch(patchObj);
1565 } else {
1566 var _options3 = options,
1567 _callback2 = _options3.callback;
1568 structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, _objectSpread2(_objectSpread2({}, options), {}, {
1569 callback: function callback(patchObj) {
1570 if (!patchObj) {
1571 _callback2();
1572 } else {
1573 _callback2(formatPatch(patchObj));
1574 }
1575 }
1576 }));
1577 }
1578}
1579function createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
1580 return createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
1581}
1582
1583/**
1584 * Split `text` into an array of lines, including the trailing newline character (where present)
1585 */
1586function splitLines(text) {
1587 var hasTrailingNl = text.endsWith('\n');
1588 var result = text.split('\n').map(function (line) {
1589 return line + '\n';
1590 });
1591 if (hasTrailingNl) {
1592 result.pop();
1593 } else {
1594 result.push(result.pop().slice(0, -1));
1595 }
1596 return result;
1597}
1598
1599function arrayEqual(a, b) {
1600 if (a.length !== b.length) {
1601 return false;
1602 }
1603 return arrayStartsWith(a, b);
1604}
1605function arrayStartsWith(array, start) {
1606 if (start.length > array.length) {
1607 return false;
1608 }
1609 for (var i = 0; i < start.length; i++) {
1610 if (start[i] !== array[i]) {
1611 return false;
1612 }
1613 }
1614 return true;
1615}
1616
1617function calcLineCount(hunk) {
1618 var _calcOldNewLineCount = calcOldNewLineCount(hunk.lines),
1619 oldLines = _calcOldNewLineCount.oldLines,
1620 newLines = _calcOldNewLineCount.newLines;
1621 if (oldLines !== undefined) {
1622 hunk.oldLines = oldLines;
1623 } else {
1624 delete hunk.oldLines;
1625 }
1626 if (newLines !== undefined) {
1627 hunk.newLines = newLines;
1628 } else {
1629 delete hunk.newLines;
1630 }
1631}
1632function merge(mine, theirs, base) {
1633 mine = loadPatch(mine, base);
1634 theirs = loadPatch(theirs, base);
1635 var ret = {};
1636
1637 // For index we just let it pass through as it doesn't have any necessary meaning.
1638 // Leaving sanity checks on this to the API consumer that may know more about the
1639 // meaning in their own context.
1640 if (mine.index || theirs.index) {
1641 ret.index = mine.index || theirs.index;
1642 }
1643 if (mine.newFileName || theirs.newFileName) {
1644 if (!fileNameChanged(mine)) {
1645 // No header or no change in ours, use theirs (and ours if theirs does not exist)
1646 ret.oldFileName = theirs.oldFileName || mine.oldFileName;
1647 ret.newFileName = theirs.newFileName || mine.newFileName;
1648 ret.oldHeader = theirs.oldHeader || mine.oldHeader;
1649 ret.newHeader = theirs.newHeader || mine.newHeader;
1650 } else if (!fileNameChanged(theirs)) {
1651 // No header or no change in theirs, use ours
1652 ret.oldFileName = mine.oldFileName;
1653 ret.newFileName = mine.newFileName;
1654 ret.oldHeader = mine.oldHeader;
1655 ret.newHeader = mine.newHeader;
1656 } else {
1657 // Both changed... figure it out
1658 ret.oldFileName = selectField(ret, mine.oldFileName, theirs.oldFileName);
1659 ret.newFileName = selectField(ret, mine.newFileName, theirs.newFileName);
1660 ret.oldHeader = selectField(ret, mine.oldHeader, theirs.oldHeader);
1661 ret.newHeader = selectField(ret, mine.newHeader, theirs.newHeader);
1662 }
1663 }
1664 ret.hunks = [];
1665 var mineIndex = 0,
1666 theirsIndex = 0,
1667 mineOffset = 0,
1668 theirsOffset = 0;
1669 while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {
1670 var mineCurrent = mine.hunks[mineIndex] || {
1671 oldStart: Infinity
1672 },
1673 theirsCurrent = theirs.hunks[theirsIndex] || {
1674 oldStart: Infinity
1675 };
1676 if (hunkBefore(mineCurrent, theirsCurrent)) {
1677 // This patch does not overlap with any of the others, yay.
1678 ret.hunks.push(cloneHunk(mineCurrent, mineOffset));
1679 mineIndex++;
1680 theirsOffset += mineCurrent.newLines - mineCurrent.oldLines;
1681 } else if (hunkBefore(theirsCurrent, mineCurrent)) {
1682 // This patch does not overlap with any of the others, yay.
1683 ret.hunks.push(cloneHunk(theirsCurrent, theirsOffset));
1684 theirsIndex++;
1685 mineOffset += theirsCurrent.newLines - theirsCurrent.oldLines;
1686 } else {
1687 // Overlap, merge as best we can
1688 var mergedHunk = {
1689 oldStart: Math.min(mineCurrent.oldStart, theirsCurrent.oldStart),
1690 oldLines: 0,
1691 newStart: Math.min(mineCurrent.newStart + mineOffset, theirsCurrent.oldStart + theirsOffset),
1692 newLines: 0,
1693 lines: []
1694 };
1695 mergeLines(mergedHunk, mineCurrent.oldStart, mineCurrent.lines, theirsCurrent.oldStart, theirsCurrent.lines);
1696 theirsIndex++;
1697 mineIndex++;
1698 ret.hunks.push(mergedHunk);
1699 }
1700 }
1701 return ret;
1702}
1703function loadPatch(param, base) {
1704 if (typeof param === 'string') {
1705 if (/^@@/m.test(param) || /^Index:/m.test(param)) {
1706 return parsePatch(param)[0];
1707 }
1708 if (!base) {
1709 throw new Error('Must provide a base reference or pass in a patch');
1710 }
1711 return structuredPatch(undefined, undefined, base, param);
1712 }
1713 return param;
1714}
1715function fileNameChanged(patch) {
1716 return patch.newFileName && patch.newFileName !== patch.oldFileName;
1717}
1718function selectField(index, mine, theirs) {
1719 if (mine === theirs) {
1720 return mine;
1721 } else {
1722 index.conflict = true;
1723 return {
1724 mine: mine,
1725 theirs: theirs
1726 };
1727 }
1728}
1729function hunkBefore(test, check) {
1730 return test.oldStart < check.oldStart && test.oldStart + test.oldLines < check.oldStart;
1731}
1732function cloneHunk(hunk, offset) {
1733 return {
1734 oldStart: hunk.oldStart,
1735 oldLines: hunk.oldLines,
1736 newStart: hunk.newStart + offset,
1737 newLines: hunk.newLines,
1738 lines: hunk.lines
1739 };
1740}
1741function mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
1742 // This will generally result in a conflicted hunk, but there are cases where the context
1743 // is the only overlap where we can successfully merge the content here.
1744 var mine = {
1745 offset: mineOffset,
1746 lines: mineLines,
1747 index: 0
1748 },
1749 their = {
1750 offset: theirOffset,
1751 lines: theirLines,
1752 index: 0
1753 };
1754
1755 // Handle any leading content
1756 insertLeading(hunk, mine, their);
1757 insertLeading(hunk, their, mine);
1758
1759 // Now in the overlap content. Scan through and select the best changes from each.
1760 while (mine.index < mine.lines.length && their.index < their.lines.length) {
1761 var mineCurrent = mine.lines[mine.index],
1762 theirCurrent = their.lines[their.index];
1763 if ((mineCurrent[0] === '-' || mineCurrent[0] === '+') && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {
1764 // Both modified ...
1765 mutualChange(hunk, mine, their);
1766 } else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {
1767 var _hunk$lines;
1768 // Mine inserted
1769 (_hunk$lines = hunk.lines).push.apply(_hunk$lines, _toConsumableArray(collectChange(mine)));
1770 } else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {
1771 var _hunk$lines2;
1772 // Theirs inserted
1773 (_hunk$lines2 = hunk.lines).push.apply(_hunk$lines2, _toConsumableArray(collectChange(their)));
1774 } else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {
1775 // Mine removed or edited
1776 removal(hunk, mine, their);
1777 } else if (theirCurrent[0] === '-' && mineCurrent[0] === ' ') {
1778 // Their removed or edited
1779 removal(hunk, their, mine, true);
1780 } else if (mineCurrent === theirCurrent) {
1781 // Context identity
1782 hunk.lines.push(mineCurrent);
1783 mine.index++;
1784 their.index++;
1785 } else {
1786 // Context mismatch
1787 conflict(hunk, collectChange(mine), collectChange(their));
1788 }
1789 }
1790
1791 // Now push anything that may be remaining
1792 insertTrailing(hunk, mine);
1793 insertTrailing(hunk, their);
1794 calcLineCount(hunk);
1795}
1796function mutualChange(hunk, mine, their) {
1797 var myChanges = collectChange(mine),
1798 theirChanges = collectChange(their);
1799 if (allRemoves(myChanges) && allRemoves(theirChanges)) {
1800 // Special case for remove changes that are supersets of one another
1801 if (arrayStartsWith(myChanges, theirChanges) && skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {
1802 var _hunk$lines3;
1803 (_hunk$lines3 = hunk.lines).push.apply(_hunk$lines3, _toConsumableArray(myChanges));
1804 return;
1805 } else if (arrayStartsWith(theirChanges, myChanges) && skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {
1806 var _hunk$lines4;
1807 (_hunk$lines4 = hunk.lines).push.apply(_hunk$lines4, _toConsumableArray(theirChanges));
1808 return;
1809 }
1810 } else if (arrayEqual(myChanges, theirChanges)) {
1811 var _hunk$lines5;
1812 (_hunk$lines5 = hunk.lines).push.apply(_hunk$lines5, _toConsumableArray(myChanges));
1813 return;
1814 }
1815 conflict(hunk, myChanges, theirChanges);
1816}
1817function removal(hunk, mine, their, swap) {
1818 var myChanges = collectChange(mine),
1819 theirChanges = collectContext(their, myChanges);
1820 if (theirChanges.merged) {
1821 var _hunk$lines6;
1822 (_hunk$lines6 = hunk.lines).push.apply(_hunk$lines6, _toConsumableArray(theirChanges.merged));
1823 } else {
1824 conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);
1825 }
1826}
1827function conflict(hunk, mine, their) {
1828 hunk.conflict = true;
1829 hunk.lines.push({
1830 conflict: true,
1831 mine: mine,
1832 theirs: their
1833 });
1834}
1835function insertLeading(hunk, insert, their) {
1836 while (insert.offset < their.offset && insert.index < insert.lines.length) {
1837 var line = insert.lines[insert.index++];
1838 hunk.lines.push(line);
1839 insert.offset++;
1840 }
1841}
1842function insertTrailing(hunk, insert) {
1843 while (insert.index < insert.lines.length) {
1844 var line = insert.lines[insert.index++];
1845 hunk.lines.push(line);
1846 }
1847}
1848function collectChange(state) {
1849 var ret = [],
1850 operation = state.lines[state.index][0];
1851 while (state.index < state.lines.length) {
1852 var line = state.lines[state.index];
1853
1854 // Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
1855 if (operation === '-' && line[0] === '+') {
1856 operation = '+';
1857 }
1858 if (operation === line[0]) {
1859 ret.push(line);
1860 state.index++;
1861 } else {
1862 break;
1863 }
1864 }
1865 return ret;
1866}
1867function collectContext(state, matchChanges) {
1868 var changes = [],
1869 merged = [],
1870 matchIndex = 0,
1871 contextChanges = false,
1872 conflicted = false;
1873 while (matchIndex < matchChanges.length && state.index < state.lines.length) {
1874 var change = state.lines[state.index],
1875 match = matchChanges[matchIndex];
1876
1877 // Once we've hit our add, then we are done
1878 if (match[0] === '+') {
1879 break;
1880 }
1881 contextChanges = contextChanges || change[0] !== ' ';
1882 merged.push(match);
1883 matchIndex++;
1884
1885 // Consume any additions in the other block as a conflict to attempt
1886 // to pull in the remaining context after this
1887 if (change[0] === '+') {
1888 conflicted = true;
1889 while (change[0] === '+') {
1890 changes.push(change);
1891 change = state.lines[++state.index];
1892 }
1893 }
1894 if (match.substr(1) === change.substr(1)) {
1895 changes.push(change);
1896 state.index++;
1897 } else {
1898 conflicted = true;
1899 }
1900 }
1901 if ((matchChanges[matchIndex] || '')[0] === '+' && contextChanges) {
1902 conflicted = true;
1903 }
1904 if (conflicted) {
1905 return changes;
1906 }
1907 while (matchIndex < matchChanges.length) {
1908 merged.push(matchChanges[matchIndex++]);
1909 }
1910 return {
1911 merged: merged,
1912 changes: changes
1913 };
1914}
1915function allRemoves(changes) {
1916 return changes.reduce(function (prev, change) {
1917 return prev && change[0] === '-';
1918 }, true);
1919}
1920function skipRemoveSuperset(state, removeChanges, delta) {
1921 for (var i = 0; i < delta; i++) {
1922 var changeContent = removeChanges[removeChanges.length - delta + i].substr(1);
1923 if (state.lines[state.index + i] !== ' ' + changeContent) {
1924 return false;
1925 }
1926 }
1927 state.index += delta;
1928 return true;
1929}
1930function calcOldNewLineCount(lines) {
1931 var oldLines = 0;
1932 var newLines = 0;
1933 lines.forEach(function (line) {
1934 if (typeof line !== 'string') {
1935 var myCount = calcOldNewLineCount(line.mine);
1936 var theirCount = calcOldNewLineCount(line.theirs);
1937 if (oldLines !== undefined) {
1938 if (myCount.oldLines === theirCount.oldLines) {
1939 oldLines += myCount.oldLines;
1940 } else {
1941 oldLines = undefined;
1942 }
1943 }
1944 if (newLines !== undefined) {
1945 if (myCount.newLines === theirCount.newLines) {
1946 newLines += myCount.newLines;
1947 } else {
1948 newLines = undefined;
1949 }
1950 }
1951 } else {
1952 if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {
1953 newLines++;
1954 }
1955 if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {
1956 oldLines++;
1957 }
1958 }
1959 });
1960 return {
1961 oldLines: oldLines,
1962 newLines: newLines
1963 };
1964}
1965
1966function reversePatch(structuredPatch) {
1967 if (Array.isArray(structuredPatch)) {
1968 return structuredPatch.map(reversePatch).reverse();
1969 }
1970 return _objectSpread2(_objectSpread2({}, structuredPatch), {}, {
1971 oldFileName: structuredPatch.newFileName,
1972 oldHeader: structuredPatch.newHeader,
1973 newFileName: structuredPatch.oldFileName,
1974 newHeader: structuredPatch.oldHeader,
1975 hunks: structuredPatch.hunks.map(function (hunk) {
1976 return {
1977 oldLines: hunk.newLines,
1978 oldStart: hunk.newStart,
1979 newLines: hunk.oldLines,
1980 newStart: hunk.oldStart,
1981 lines: hunk.lines.map(function (l) {
1982 if (l.startsWith('-')) {
1983 return "+".concat(l.slice(1));
1984 }
1985 if (l.startsWith('+')) {
1986 return "-".concat(l.slice(1));
1987 }
1988 return l;
1989 })
1990 };
1991 })
1992 });
1993}
1994
1995// See: http://code.google.com/p/google-diff-match-patch/wiki/API
1996function convertChangesToDMP(changes) {
1997 var ret = [],
1998 change,
1999 operation;
2000 for (var i = 0; i < changes.length; i++) {
2001 change = changes[i];
2002 if (change.added) {
2003 operation = 1;
2004 } else if (change.removed) {
2005 operation = -1;
2006 } else {
2007 operation = 0;
2008 }
2009 ret.push([operation, change.value]);
2010 }
2011 return ret;
2012}
2013
2014function convertChangesToXML(changes) {
2015 var ret = [];
2016 for (var i = 0; i < changes.length; i++) {
2017 var change = changes[i];
2018 if (change.added) {
2019 ret.push('<ins>');
2020 } else if (change.removed) {
2021 ret.push('<del>');
2022 }
2023 ret.push(escapeHTML(change.value));
2024 if (change.added) {
2025 ret.push('</ins>');
2026 } else if (change.removed) {
2027 ret.push('</del>');
2028 }
2029 }
2030 return ret.join('');
2031}
2032function escapeHTML(s) {
2033 var n = s;
2034 n = n.replace(/&/g, '&amp;');
2035 n = n.replace(/</g, '&lt;');
2036 n = n.replace(/>/g, '&gt;');
2037 n = n.replace(/"/g, '&quot;');
2038 return n;
2039}
2040
2041export { Diff, applyPatch, applyPatches, canonicalize, convertChangesToDMP, convertChangesToXML, createPatch, createTwoFilesPatch, diffArrays, diffChars, diffCss, diffJson, diffLines, diffSentences, diffTrimmedLines, diffWords, diffWordsWithSpace, formatPatch, merge, parsePatch, reversePatch, structuredPatch };