308 lines
23 KiB
JavaScript
308 lines
23 KiB
JavaScript
|
/*!
|
||
|
|
||
|
diff v3.2.0
|
||
|
|
||
|
Software License Agreement (BSD License)
|
||
|
|
||
|
Copyright (c) 2009-2015, Kevin Decker <kpdecker@gmail.com>
|
||
|
|
||
|
All rights reserved.
|
||
|
|
||
|
Redistribution and use of this software in source and binary forms, with or without modification,
|
||
|
are permitted provided that the following conditions are met:
|
||
|
|
||
|
* Redistributions of source code must retain the above
|
||
|
copyright notice, this list of conditions and the
|
||
|
following disclaimer.
|
||
|
|
||
|
* Redistributions in binary form must reproduce the above
|
||
|
copyright notice, this list of conditions and the
|
||
|
following disclaimer in the documentation and/or other
|
||
|
materials provided with the distribution.
|
||
|
|
||
|
* Neither the name of Kevin Decker nor the names of its
|
||
|
contributors may be used to endorse or promote products
|
||
|
derived from this software without specific prior
|
||
|
written permission.
|
||
|
|
||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
|
||
|
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||
|
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
|
||
|
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||
|
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
@license
|
||
|
*/
|
||
|
!function(a,b){"object"==typeof exports&&"object"==typeof module?module.exports=b():"function"==typeof define&&define.amd?define([],b):"object"==typeof exports?exports.JsDiff=b():a.JsDiff=b()}(this,function(){/******/
|
||
|
return function(a){/******/
|
||
|
// The require function
|
||
|
/******/
|
||
|
function b(d){/******/
|
||
|
// Check if module is in cache
|
||
|
/******/
|
||
|
if(c[d])/******/
|
||
|
return c[d].exports;/******/
|
||
|
// Create a new module (and put it into the cache)
|
||
|
/******/
|
||
|
var e=c[d]={/******/
|
||
|
exports:{},/******/
|
||
|
id:d,/******/
|
||
|
loaded:!1};/******/
|
||
|
// Return the exports of the module
|
||
|
/******/
|
||
|
/******/
|
||
|
// Execute the module function
|
||
|
/******/
|
||
|
/******/
|
||
|
// Flag the module as loaded
|
||
|
/******/
|
||
|
return a[d].call(e.exports,e,e.exports,b),e.loaded=!0,e.exports}// webpackBootstrap
|
||
|
/******/
|
||
|
// The module cache
|
||
|
/******/
|
||
|
var c={};/******/
|
||
|
// Load entry module and return exports
|
||
|
/******/
|
||
|
/******/
|
||
|
// expose the modules object (__webpack_modules__)
|
||
|
/******/
|
||
|
/******/
|
||
|
// expose the module cache
|
||
|
/******/
|
||
|
/******/
|
||
|
// __webpack_public_path__
|
||
|
/******/
|
||
|
return b.m=a,b.c=c,b.p="",b(0)}([/* 0 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";/*istanbul ignore start*/
|
||
|
function d(a){return a&&a.__esModule?a:{"default":a}}b.__esModule=!0,b.canonicalize=b.convertChangesToXML=b.convertChangesToDMP=b.parsePatch=b.applyPatches=b.applyPatch=b.createPatch=b.createTwoFilesPatch=b.structuredPatch=b.diffArrays=b.diffJson=b.diffCss=b.diffSentences=b.diffTrimmedLines=b.diffLines=b.diffWordsWithSpace=b.diffWords=b.diffChars=b.Diff=void 0;/*istanbul ignore end*/
|
||
|
var/*istanbul ignore start*/e=c(1),f=d(e),/*istanbul ignore start*/g=c(2),/*istanbul ignore start*/h=c(3),/*istanbul ignore start*/i=c(5),/*istanbul ignore start*/j=c(6),/*istanbul ignore start*/k=c(7),/*istanbul ignore start*/l=c(8),/*istanbul ignore start*/m=c(9),/*istanbul ignore start*/n=c(10),/*istanbul ignore start*/o=c(11),/*istanbul ignore start*/p=c(13),/*istanbul ignore start*/q=c(14),/*istanbul ignore start*/r=c(15);b.Diff=f["default"],/*istanbul ignore start*/
|
||
|
b.diffChars=g.diffChars,/*istanbul ignore start*/
|
||
|
b.diffWords=h.diffWords,/*istanbul ignore start*/
|
||
|
b.diffWordsWithSpace=h.diffWordsWithSpace,/*istanbul ignore start*/
|
||
|
b.diffLines=i.diffLines,/*istanbul ignore start*/
|
||
|
b.diffTrimmedLines=i.diffTrimmedLines,/*istanbul ignore start*/
|
||
|
b.diffSentences=j.diffSentences,/*istanbul ignore start*/
|
||
|
b.diffCss=k.diffCss,/*istanbul ignore start*/
|
||
|
b.diffJson=l.diffJson,/*istanbul ignore start*/
|
||
|
b.diffArrays=m.diffArrays,/*istanbul ignore start*/
|
||
|
b.structuredPatch=p.structuredPatch,/*istanbul ignore start*/
|
||
|
b.createTwoFilesPatch=p.createTwoFilesPatch,/*istanbul ignore start*/
|
||
|
b.createPatch=p.createPatch,/*istanbul ignore start*/
|
||
|
b.applyPatch=n.applyPatch,/*istanbul ignore start*/
|
||
|
b.applyPatches=n.applyPatches,/*istanbul ignore start*/
|
||
|
b.parsePatch=o.parsePatch,/*istanbul ignore start*/
|
||
|
b.convertChangesToDMP=q.convertChangesToDMP,/*istanbul ignore start*/
|
||
|
b.convertChangesToXML=r.convertChangesToXML,/*istanbul ignore start*/
|
||
|
b.canonicalize=l.canonicalize},/* 1 */
|
||
|
/***/
|
||
|
function(a,b){/*istanbul ignore start*/
|
||
|
"use strict";function c(){}function d(a,b,c,d,e){for(var f=0,g=b.length,h=0,i=0;f<g;f++){var j=b[f];if(j.removed){
|
||
|
// Reverse add and remove so removes are output first to match common convention
|
||
|
// The diffing algorithm is tied to add then remove output and this is the simplest
|
||
|
// route to get the desired output with minimal overhead.
|
||
|
if(j.value=a.join(d.slice(i,i+j.count)),i+=j.count,f&&b[f-1].added){var k=b[f-1];b[f-1]=b[f],b[f]=k}}else{if(!j.added&&e){var l=c.slice(h,h+j.count);l=l.map(function(a,b){var c=d[i+b];return c.length>a.length?c:a}),j.value=a.join(l)}else j.value=a.join(c.slice(h,h+j.count));h+=j.count,
|
||
|
// Common case
|
||
|
j.added||(i+=j.count)}}
|
||
|
// Special case handle for when one terminal is ignored. For this case we merge the
|
||
|
// terminal into the prior string and drop the change.
|
||
|
var m=b[g-1];return g>1&&(m.added||m.removed)&&a.equals("",m.value)&&(b[g-2].value+=m.value,b.pop()),b}function e(a){return{newPos:a.newPos,components:a.components.slice(0)}}b.__esModule=!0,b["default"]=/*istanbul ignore end*/c,c.prototype={/*istanbul ignore start*/
|
||
|
/*istanbul ignore end*/
|
||
|
diff:function(a,b){function c(a){return h?(setTimeout(function(){h(void 0,a)},0),!0):a}
|
||
|
// Main worker method. checks all permutations of a given edit length for acceptance.
|
||
|
function f(){for(var f=-1*l;f<=l;f+=2){var g=/*istanbul ignore start*/void 0,h=n[f-1],m=n[f+1],o=(m?m.newPos:0)-f;h&&(
|
||
|
// No one else is going to attempt to use this value, clear it
|
||
|
n[f-1]=void 0);var p=h&&h.newPos+1<j,q=m&&0<=o&&o<k;if(p||q){
|
||
|
// If we have hit the end of both strings, then we are done
|
||
|
if(
|
||
|
// Select the diagonal that we want to branch from. We select the prior
|
||
|
// path whose position in the new string is the farthest from the origin
|
||
|
// and does not pass the bounds of the diff graph
|
||
|
!p||q&&h.newPos<m.newPos?(g=e(m),i.pushComponent(g.components,void 0,!0)):(g=h,// No need to clone, we've pulled it from the list
|
||
|
g.newPos++,i.pushComponent(g.components,!0,void 0)),o=i.extractCommon(g,b,a,f),g.newPos+1>=j&&o+1>=k)return c(d(i,g.components,b,a,i.useLongestToken));
|
||
|
// Otherwise track this path as a potential candidate and continue.
|
||
|
n[f]=g}else
|
||
|
// If this path is a terminal then prune
|
||
|
n[f]=void 0}l++}/*istanbul ignore start*/
|
||
|
var/*istanbul ignore end*/g=arguments.length<=2||void 0===arguments[2]?{}:arguments[2],h=g.callback;"function"==typeof g&&(h=g,g={}),this.options=g;var i=this;
|
||
|
// Allow subclasses to massage the input prior to running
|
||
|
a=this.castInput(a),b=this.castInput(b),a=this.removeEmpty(this.tokenize(a)),b=this.removeEmpty(this.tokenize(b));var j=b.length,k=a.length,l=1,m=j+k,n=[{newPos:-1,components:[]}],o=this.extractCommon(n[0],b,a,0);if(n[0].newPos+1>=j&&o+1>=k)
|
||
|
// Identity per the equality and tokenizer
|
||
|
return c([{value:this.join(b),count:b.length}]);
|
||
|
// Performs the length of edit iteration. Is a bit fugly as this has to support the
|
||
|
// sync and async mode which is never fun. Loops over execEditLength until a value
|
||
|
// is produced.
|
||
|
if(h)!function q(){setTimeout(function(){
|
||
|
// This should not happen, but we want to be safe.
|
||
|
/* istanbul ignore next */
|
||
|
// This should not happen, but we want to be safe.
|
||
|
/* istanbul ignore next */
|
||
|
return l>m?h():void(f()||q())},0)}();else for(;l<=m;){var p=f();if(p)return p}},/*istanbul ignore start*/
|
||
|
/*istanbul ignore end*/
|
||
|
pushComponent:function(a,b,c){var d=a[a.length-1];d&&d.added===b&&d.removed===c?
|
||
|
// We need to clone here as the component clone operation is just
|
||
|
// as shallow array clone
|
||
|
a[a.length-1]={count:d.count+1,added:b,removed:c}:a.push({count:1,added:b,removed:c})},/*istanbul ignore start*/
|
||
|
/*istanbul ignore end*/
|
||
|
extractCommon:function(a,b,c,d){for(var e=b.length,f=c.length,g=a.newPos,h=g-d,i=0;g+1<e&&h+1<f&&this.equals(b[g+1],c[h+1]);)g++,h++,i++;return i&&a.components.push({count:i}),a.newPos=g,h},/*istanbul ignore start*/
|
||
|
/*istanbul ignore end*/
|
||
|
equals:function(a,b){return a===b},/*istanbul ignore start*/
|
||
|
/*istanbul ignore end*/
|
||
|
removeEmpty:function(a){for(var b=[],c=0;c<a.length;c++)a[c]&&b.push(a[c]);return b},/*istanbul ignore start*/
|
||
|
/*istanbul ignore end*/
|
||
|
castInput:function(a){return a},/*istanbul ignore start*/
|
||
|
/*istanbul ignore end*/
|
||
|
tokenize:function(a){return a.split("")},/*istanbul ignore start*/
|
||
|
/*istanbul ignore end*/
|
||
|
join:function(a){return a.join("")}}},/* 2 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";function d(a){return a&&a.__esModule?a:{"default":a}}function e(a,b,c){return h.diff(a,b,c)}b.__esModule=!0,b.characterDiff=void 0,b.diffChars=e;var/*istanbul ignore start*/f=c(1),g=d(f),h=/*istanbul ignore start*/b.characterDiff=new/*istanbul ignore start*/g["default"]},/* 3 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";/*istanbul ignore start*/
|
||
|
function d(a){return a&&a.__esModule?a:{"default":a}}function e(a,b,c){var d=/*istanbul ignore start*/(0,i.generateOptions)(c,{ignoreWhitespace:!0});return l.diff(a,b,d)}function f(a,b,c){return l.diff(a,b,c)}b.__esModule=!0,b.wordDiff=void 0,b.diffWords=e,/*istanbul ignore start*/
|
||
|
b.diffWordsWithSpace=f;var/*istanbul ignore start*/g=c(1),h=d(g),/*istanbul ignore start*/i=c(4),j=/^[A-Za-z\xC0-\u02C6\u02C8-\u02D7\u02DE-\u02FF\u1E00-\u1EFF]+$/,k=/\S/,l=/*istanbul ignore start*/b.wordDiff=new/*istanbul ignore start*/h["default"];l.equals=function(a,b){return a===b||this.options.ignoreWhitespace&&!k.test(a)&&!k.test(b)},l.tokenize=function(a){
|
||
|
// Join the boundary splits that we do not consider to be boundaries. This is primarily the extended Latin character set.
|
||
|
for(var b=a.split(/(\s+|\b)/),c=0;c<b.length-1;c++)
|
||
|
// If we have an empty string in the next field and we have only word chars before and after, merge
|
||
|
!b[c+1]&&b[c+2]&&j.test(b[c])&&j.test(b[c+2])&&(b[c]+=b[c+2],b.splice(c+1,2),c--);return b}},/* 4 */
|
||
|
/***/
|
||
|
function(a,b){/*istanbul ignore start*/
|
||
|
"use strict";function c(a,b){if("function"==typeof a)b.callback=a;else if(a)for(var c in a)/* istanbul ignore else */
|
||
|
a.hasOwnProperty(c)&&(b[c]=a[c]);return b}b.__esModule=!0,b.generateOptions=c},/* 5 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";/*istanbul ignore start*/
|
||
|
function d(a){return a&&a.__esModule?a:{"default":a}}function e(a,b,c){return j.diff(a,b,c)}function f(a,b,c){var d=/*istanbul ignore start*/(0,i.generateOptions)(c,{ignoreWhitespace:!0});return j.diff(a,b,d)}b.__esModule=!0,b.lineDiff=void 0,b.diffLines=e,/*istanbul ignore start*/
|
||
|
b.diffTrimmedLines=f;var/*istanbul ignore start*/g=c(1),h=d(g),/*istanbul ignore start*/i=c(4),j=/*istanbul ignore start*/b.lineDiff=new/*istanbul ignore start*/h["default"];j.tokenize=function(a){var b=[],c=a.split(/(\n|\r\n)/);
|
||
|
// Ignore the final empty token that occurs if the string ends with a new line
|
||
|
c[c.length-1]||c.pop();
|
||
|
// Merge the content and line separators into single tokens
|
||
|
for(var d=0;d<c.length;d++){var e=c[d];d%2&&!this.options.newlineIsToken?b[b.length-1]+=e:(this.options.ignoreWhitespace&&(e=e.trim()),b.push(e))}return b}},/* 6 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";function d(a){return a&&a.__esModule?a:{"default":a}}function e(a,b,c){return h.diff(a,b,c)}b.__esModule=!0,b.sentenceDiff=void 0,b.diffSentences=e;var/*istanbul ignore start*/f=c(1),g=d(f),h=/*istanbul ignore start*/b.sentenceDiff=new/*istanbul ignore start*/g["default"];h.tokenize=function(a){return a.split(/(\S.+?[.!?])(?=\s+|$)/)}},/* 7 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";function d(a){return a&&a.__esModule?a:{"default":a}}function e(a,b,c){return h.diff(a,b,c)}b.__esModule=!0,b.cssDiff=void 0,b.diffCss=e;var/*istanbul ignore start*/f=c(1),g=d(f),h=/*istanbul ignore start*/b.cssDiff=new/*istanbul ignore start*/g["default"];h.tokenize=function(a){return a.split(/([{}:;,]|\s+)/)}},/* 8 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";/*istanbul ignore start*/
|
||
|
function d(a){return a&&a.__esModule?a:{"default":a}}function e(a,b,c){return l.diff(a,b,c)}
|
||
|
// This function handles the presence of circular references by bailing out when encountering an
|
||
|
// object that is already on the "stack" of items being processed.
|
||
|
function f(a,b,c){b=b||[],c=c||[];var d=/*istanbul ignore start*/void 0;for(d=0;d<b.length;d+=1)if(b[d]===a)return c[d];var e=/*istanbul ignore start*/void 0;if("[object Array]"===k.call(a)){for(b.push(a),e=new Array(a.length),c.push(e),d=0;d<a.length;d+=1)e[d]=f(a[d],b,c);return b.pop(),c.pop(),e}if(a&&a.toJSON&&(a=a.toJSON()),/*istanbul ignore start*/"object"===("undefined"==typeof/*istanbul ignore end*/a?"undefined":g(a))&&null!==a){b.push(a),e={},c.push(e);var h=[],i=/*istanbul ignore start*/void 0;for(i in a)/* istanbul ignore else */
|
||
|
a.hasOwnProperty(i)&&h.push(i);for(h.sort(),d=0;d<h.length;d+=1)i=h[d],e[i]=f(a[i],b,c);b.pop(),c.pop()}else e=a;return e}b.__esModule=!0,b.jsonDiff=void 0;var g="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(a){return typeof a}:function(a){return a&&"function"==typeof Symbol&&a.constructor===Symbol?"symbol":typeof a};b.diffJson=e,/*istanbul ignore start*/
|
||
|
b.canonicalize=f;var/*istanbul ignore start*/h=c(1),i=d(h),/*istanbul ignore start*/j=c(5),k=Object.prototype.toString,l=/*istanbul ignore start*/b.jsonDiff=new/*istanbul ignore start*/i["default"];
|
||
|
// Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
|
||
|
// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
|
||
|
l.useLongestToken=!0,l.tokenize=/*istanbul ignore start*/j.lineDiff.tokenize,l.castInput=function(a){/*istanbul ignore start*/
|
||
|
var/*istanbul ignore end*/b=this.options.undefinedReplacement;return"string"==typeof a?a:JSON.stringify(f(a),function(a,c){return"undefined"==typeof c?b:c}," ")},l.equals=function(a,b){/*istanbul ignore start*/
|
||
|
return i["default"].prototype.equals(a.replace(/,([\r\n])/g,"$1"),b.replace(/,([\r\n])/g,"$1"))}},/* 9 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";function d(a){return a&&a.__esModule?a:{"default":a}}function e(a,b,c){return h.diff(a,b,c)}b.__esModule=!0,b.arrayDiff=void 0,b.diffArrays=e;var/*istanbul ignore start*/f=c(1),g=d(f),h=/*istanbul ignore start*/b.arrayDiff=new/*istanbul ignore start*/g["default"];h.tokenize=h.join=function(a){return a.slice()}},/* 10 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";function d(a){return a&&a.__esModule?a:{"default":a}}/*istanbul ignore end*/
|
||
|
function e(a,b){/**
|
||
|
* Checks if the hunk exactly fits on the provided location
|
||
|
*/
|
||
|
function c(a,b){for(var c=0;c<a.lines.length;c++){var d=a.lines[c],f=d[0],g=d.substr(1);if(" "===f||"-"===f){
|
||
|
// Context sanity check
|
||
|
if(!j(b+1,e[b],f,g)&&(k++,k>l))return!1;b++}}return!0}/*istanbul ignore start*/
|
||
|
var/*istanbul ignore end*/d=arguments.length<=2||void 0===arguments[2]?{}:arguments[2];if("string"==typeof b&&(b=/*istanbul ignore start*/(0,g.parsePatch)(b)),Array.isArray(b)){if(b.length>1)throw new Error("applyPatch only works with a single input.");b=b[0]}
|
||
|
// Search best fit offsets for each hunk based on the previous ones
|
||
|
for(var e=a.split(/\r\n|[\n\v\f\r\x85]/),f=a.match(/\r\n|[\n\v\f\r\x85]/g)||[],h=b.hunks,j=d.compareLine||function(a,b,c,d){/*istanbul ignore end*/
|
||
|
return b===d},k=0,l=d.fuzzFactor||0,m=0,n=0,o=/*istanbul ignore start*/void 0,p=/*istanbul ignore start*/void 0,q=0;q<h.length;q++){for(var r=h[q],s=e.length-r.oldLines,t=0,u=n+r.oldStart-1,v=/*istanbul ignore start*/(0,i["default"])(u,m,s);void 0!==t;t=v())if(c(r,u+t)){r.offset=n+=t;break}if(void 0===t)return!1;
|
||
|
// Set lower text limit to end of the current hunk, so next ones don't try
|
||
|
// to fit over already patched text
|
||
|
m=r.offset+r.oldStart+r.oldLines}
|
||
|
// Apply patch hunks
|
||
|
for(var w=0;w<h.length;w++){var x=h[w],y=x.offset+x.newStart-1;0==x.newLines&&y++;for(var z=0;z<x.lines.length;z++){var A=x.lines[z],B=A[0],C=A.substr(1),D=x.linedelimiters[z];if(" "===B)y++;else if("-"===B)e.splice(y,1),f.splice(y,1);else if("+"===B)e.splice(y,0,C),f.splice(y,0,D),y++;else if("\\"===B){var E=x.lines[z-1]?x.lines[z-1][0]:null;"+"===E?o=!0:"-"===E&&(p=!0)}}}
|
||
|
// Handle EOFNL insertion/removal
|
||
|
if(o)for(;!e[e.length-1];)e.pop(),f.pop();else p&&(e.push(""),f.push("\n"));for(var F=0;F<e.length-1;F++)e[F]=e[F]+f[F];return e.join("")}
|
||
|
// Wrapper that supports multiple file patches via callbacks.
|
||
|
function f(a,b){function c(){var f=a[d++];return f?void b.loadFile(f,function(a,d){if(a)return b.complete(a);var g=e(d,f,b);b.patched(f,g,function(a){return a?b.complete(a):void c()})}):b.complete()}"string"==typeof a&&(a=/*istanbul ignore start*/(0,g.parsePatch)(a));var d=0;c()}b.__esModule=!0,b.applyPatch=e,/*istanbul ignore start*/
|
||
|
b.applyPatches=f;var/*istanbul ignore start*/g=c(11),/*istanbul ignore start*/h=c(12),i=d(h)},/* 11 */
|
||
|
/***/
|
||
|
function(a,b){/*istanbul ignore start*/
|
||
|
"use strict";function c(a){function b(){var a={};
|
||
|
// Parse diff metadata
|
||
|
for(h.push(a);i<f.length;){var b=f[i];
|
||
|
// File header found, end parsing diff metadata
|
||
|
if(/^(\-\-\-|\+\+\+|@@)\s/.test(b))break;
|
||
|
// Diff index
|
||
|
var g=/^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(b);g&&(a.index=g[1]),i++}for(
|
||
|
// Parse file headers if they are defined. Unified diff requires them, but
|
||
|
// there's no technical issues to have an isolated hunk without file header
|
||
|
c(a),c(a),
|
||
|
// Parse hunks
|
||
|
a.hunks=[];i<f.length;){var j=f[i];if(/^(Index:|diff|\-\-\-|\+\+\+)\s/.test(j))break;if(/^@@/.test(j))a.hunks.push(d());else{if(j&&e.strict)
|
||
|
// Ignore unexpected content unless in strict mode
|
||
|
throw new Error("Unknown line "+(i+1)+" "+JSON.stringify(j));i++}}}
|
||
|
// Parses the --- and +++ headers, if none are found, no lines
|
||
|
// are consumed.
|
||
|
function c(a){var b=/^(---|\+\+\+)\s+([\S ]*)(?:\t(.*?)\s*)?$/,c=b.exec(f[i]);if(c){var d="---"===c[1]?"old":"new";a[d+"FileName"]=c[2],a[d+"Header"]=c[3],i++}}
|
||
|
// Parses a hunk
|
||
|
// This assumes that we are at the start of a hunk.
|
||
|
function d(){for(var a=i,b=f[i++],c=b.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/),d={oldStart:+c[1],oldLines:+c[2]||1,newStart:+c[3],newLines:+c[4]||1,lines:[],linedelimiters:[]},h=0,j=0;i<f.length&&!(0===f[i].indexOf("--- ")&&i+2<f.length&&0===f[i+1].indexOf("+++ ")&&0===f[i+2].indexOf("@@"));i++){var k=f[i][0];if("+"!==k&&"-"!==k&&" "!==k&&"\\"!==k)break;d.lines.push(f[i]),d.linedelimiters.push(g[i]||"\n"),"+"===k?h++:"-"===k?j++:" "===k&&(h++,j++)}
|
||
|
// Perform optional sanity checking
|
||
|
if(
|
||
|
// Handle the empty block count case
|
||
|
h||1!==d.newLines||(d.newLines=0),j||1!==d.oldLines||(d.oldLines=0),e.strict){if(h!==d.newLines)throw new Error("Added line count did not match for hunk at line "+(a+1));if(j!==d.oldLines)throw new Error("Removed line count did not match for hunk at line "+(a+1))}return d}for(/*istanbul ignore start*/
|
||
|
var/*istanbul ignore end*/e=arguments.length<=1||void 0===arguments[1]?{}:arguments[1],f=a.split(/\r\n|[\n\v\f\r\x85]/),g=a.match(/\r\n|[\n\v\f\r\x85]/g)||[],h=[],i=0;i<f.length;)b();return h}b.__esModule=!0,b.parsePatch=c},/* 12 */
|
||
|
/***/
|
||
|
function(a,b){/*istanbul ignore start*/
|
||
|
"use strict";b.__esModule=!0,b["default"]=/*istanbul ignore end*/function(a,b,c){var d=!0,e=!1,f=!1,g=1;return function h(){if(d&&!f){
|
||
|
// Check if trying to fit beyond text length, and if not, check it fits
|
||
|
// after offset location (or desired location on first iteration)
|
||
|
if(e?g++:d=!1,a+g<=c)return g;f=!0}if(!e)
|
||
|
// Check if trying to fit before text beginning, and if not, check it fits
|
||
|
// before offset location
|
||
|
// Check if trying to fit before text beginning, and if not, check it fits
|
||
|
// before offset location
|
||
|
return f||(d=!0),b<=a-g?-g++:(e=!0,h())}}},/* 13 */
|
||
|
/***/
|
||
|
function(a,b,c){/*istanbul ignore start*/
|
||
|
"use strict";/*istanbul ignore start*/
|
||
|
function d(a){if(Array.isArray(a)){for(var b=0,c=Array(a.length);b<a.length;b++)c[b]=a[b];return c}return Array.from(a)}/*istanbul ignore end*/
|
||
|
function e(a,b,c,e,f,g,i){// Append an empty value to make cleanup easier
|
||
|
function j(a){return a.map(function(a){return" "+a})}i||(i={}),"undefined"==typeof i.context&&(i.context=4);var k=/*istanbul ignore start*/(0,h.diffLines)(c,e,i);k.push({value:"",lines:[]});for(var l=[],m=0,n=0,o=[],p=1,q=1,r=function(/*istanbul ignore end*/a){var b=k[a],f=b.lines||b.value.replace(/\n$/,"").split("\n");if(b.lines=f,b.added||b.removed){/*istanbul ignore start*/
|
||
|
var g;/*istanbul ignore end*/
|
||
|
// If we have previous context, start with that
|
||
|
if(!m){var h=k[a-1];m=p,n=q,h&&(o=i.context>0?j(h.lines.slice(-i.context)):[],m-=o.length,n-=o.length)}
|
||
|
// Output our changes
|
||
|
/*istanbul ignore start*/
|
||
|
(g=/*istanbul ignore end*/o).push.apply(/*istanbul ignore start*/g,/*istanbul ignore start*/d(/*istanbul ignore end*/f.map(function(a){return(b.added?"+":"-")+a}))),
|
||
|
// Track the updated file position
|
||
|
b.added?q+=f.length:p+=f.length}else{
|
||
|
// Identical context lines. Track line changes
|
||
|
if(m)
|
||
|
// Close out any changes that have been output (or join overlapping)
|
||
|
if(f.length<=2*i.context&&a<k.length-2){/*istanbul ignore start*/
|
||
|
var r;/*istanbul ignore end*/
|
||
|
// Overlapping
|
||
|
/*istanbul ignore start*/
|
||
|
(r=/*istanbul ignore end*/o).push.apply(/*istanbul ignore start*/r,/*istanbul ignore start*/d(/*istanbul ignore end*/j(f)))}else{/*istanbul ignore start*/
|
||
|
var s,t=Math.min(f.length,i.context);/*istanbul ignore start*/
|
||
|
(s=/*istanbul ignore end*/o).push.apply(/*istanbul ignore start*/s,/*istanbul ignore start*/d(/*istanbul ignore end*/j(f.slice(0,t))));var u={oldStart:m,oldLines:p-m+t,newStart:n,newLines:q-n+t,lines:o};if(a>=k.length-2&&f.length<=i.context){
|
||
|
// EOF is inside this hunk
|
||
|
var v=/\n$/.test(c),w=/\n$/.test(e);0!=f.length||v?v&&w||o.push("\\ No newline at end of file"):
|
||
|
// special case: old has no eol and no trailing context; no-nl can end up before adds
|
||
|
o.splice(u.oldLines,0,"\\ No newline at end of file")}l.push(u),m=0,n=0,o=[]}p+=f.length,q+=f.length}},s=0;s<k.length;s++)/*istanbul ignore start*/
|
||
|
r(/*istanbul ignore end*/s);return{oldFileName:a,newFileName:b,oldHeader:f,newHeader:g,hunks:l}}function f(a,b,c,d,f,g,h){var i=e(a,b,c,d,f,g,h),j=[];a==b&&j.push("Index: "+a),j.push("==================================================================="),j.push("--- "+i.oldFileName+("undefined"==typeof i.oldHeader?"":"\t"+i.oldHeader)),j.push("+++ "+i.newFileName+("undefined"==typeof i.newHeader?"":"\t"+i.newHeader));for(var k=0;k<i.hunks.length;k++){var l=i.hunks[k];j.push("@@ -"+l.oldStart+","+l.oldLines+" +"+l.newStart+","+l.newLines+" @@"),j.push.apply(j,l.lines)}return j.join("\n")+"\n"}function g(a,b,c,d,e,g){return f(a,a,b,c,d,e,g)}b.__esModule=!0,b.structuredPatch=e,/*istanbul ignore start*/
|
||
|
b.createTwoFilesPatch=f,/*istanbul ignore start*/
|
||
|
b.createPatch=g;var/*istanbul ignore start*/h=c(5)},/* 14 */
|
||
|
/***/
|
||
|
function(a,b){/*istanbul ignore start*/
|
||
|
"use strict";
|
||
|
// See: http://code.google.com/p/google-diff-match-patch/wiki/API
|
||
|
function c(a){for(var b=[],c=/*istanbul ignore start*/void 0,d=/*istanbul ignore start*/void 0,e=0;e<a.length;e++)c=a[e],d=c.added?1:c.removed?-1:0,b.push([d,c.value]);return b}b.__esModule=!0,b.convertChangesToDMP=c},/* 15 */
|
||
|
/***/
|
||
|
function(a,b){/*istanbul ignore start*/
|
||
|
"use strict";function c(a){for(var b=[],c=0;c<a.length;c++){var e=a[c];e.added?b.push("<ins>"):e.removed&&b.push("<del>"),b.push(d(e.value)),e.added?b.push("</ins>"):e.removed&&b.push("</del>")}return b.join("")}function d(a){var b=a;return b=b.replace(/&/g,"&"),b=b.replace(/</g,"<"),b=b.replace(/>/g,">"),b=b.replace(/"/g,""")}b.__esModule=!0,b.convertChangesToXML=c}])});
|