7030 lines
222 KiB
JavaScript
7030 lines
222 KiB
JavaScript
|
exports.id = 905;
|
||
|
exports.ids = [905];
|
||
|
exports.modules = {
|
||
|
|
||
|
/***/ 55285:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
const Utils = __webpack_require__(85173);
|
||
|
const pth = __webpack_require__(85622);
|
||
|
const ZipEntry = __webpack_require__(47396);
|
||
|
const ZipFile = __webpack_require__(56333);
|
||
|
|
||
|
const get_Bool = (val, def) => (typeof val === "boolean" ? val : def);
|
||
|
const get_Str = (val, def) => (typeof val === "string" ? val : def);
|
||
|
|
||
|
const defaultOptions = {
|
||
|
// option "noSort" : if true it disables files sorting
|
||
|
noSort: false,
|
||
|
// read entries during load (initial loading may be slower)
|
||
|
readEntries: false,
|
||
|
// default method is none
|
||
|
method: Utils.Constants.NONE,
|
||
|
// file system
|
||
|
fs: null
|
||
|
};
|
||
|
|
||
|
module.exports = function (/**String*/ input, /** object */ options) {
|
||
|
let inBuffer = null;
|
||
|
|
||
|
// create object based default options, allowing them to be overwritten
|
||
|
const opts = Object.assign(Object.create(null), defaultOptions);
|
||
|
|
||
|
// test input variable
|
||
|
if (input && "object" === typeof input) {
|
||
|
// if value is not buffer we accept it to be object with options
|
||
|
if (!(input instanceof Uint8Array)) {
|
||
|
Object.assign(opts, input);
|
||
|
input = opts.input ? opts.input : undefined;
|
||
|
if (opts.input) delete opts.input;
|
||
|
}
|
||
|
|
||
|
// if input is buffer
|
||
|
if (Buffer.isBuffer(input)) {
|
||
|
inBuffer = input;
|
||
|
opts.method = Utils.Constants.BUFFER;
|
||
|
input = undefined;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// assign options
|
||
|
Object.assign(opts, options);
|
||
|
|
||
|
// instanciate utils filesystem
|
||
|
const filetools = new Utils(opts);
|
||
|
|
||
|
// if input is file name we retrieve its content
|
||
|
if (input && "string" === typeof input) {
|
||
|
// load zip file
|
||
|
if (filetools.fs.existsSync(input)) {
|
||
|
opts.method = Utils.Constants.FILE;
|
||
|
opts.filename = input;
|
||
|
inBuffer = filetools.fs.readFileSync(input);
|
||
|
} else {
|
||
|
throw new Error(Utils.Errors.INVALID_FILENAME);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// create variable
|
||
|
const _zip = new ZipFile(inBuffer, opts);
|
||
|
|
||
|
const { canonical, sanitize } = Utils;
|
||
|
|
||
|
function getEntry(/**Object*/ entry) {
|
||
|
if (entry && _zip) {
|
||
|
var item;
|
||
|
// If entry was given as a file name
|
||
|
if (typeof entry === "string") item = _zip.getEntry(entry);
|
||
|
// if entry was given as a ZipEntry object
|
||
|
if (typeof entry === "object" && typeof entry.entryName !== "undefined" && typeof entry.header !== "undefined") item = _zip.getEntry(entry.entryName);
|
||
|
|
||
|
if (item) {
|
||
|
return item;
|
||
|
}
|
||
|
}
|
||
|
return null;
|
||
|
}
|
||
|
|
||
|
function fixPath(zipPath) {
|
||
|
const { join, normalize, sep } = pth.posix;
|
||
|
// convert windows file separators and normalize
|
||
|
return join(".", normalize(sep + zipPath.split("\\").join(sep) + sep));
|
||
|
}
|
||
|
|
||
|
return {
|
||
|
/**
|
||
|
* Extracts the given entry from the archive and returns the content as a Buffer object
|
||
|
* @param entry ZipEntry object or String with the full path of the entry
|
||
|
*
|
||
|
* @return Buffer or Null in case of error
|
||
|
*/
|
||
|
readFile: function (/**Object*/ entry, /*String, Buffer*/ pass) {
|
||
|
var item = getEntry(entry);
|
||
|
return (item && item.getData(pass)) || null;
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Asynchronous readFile
|
||
|
* @param entry ZipEntry object or String with the full path of the entry
|
||
|
* @param callback
|
||
|
*
|
||
|
* @return Buffer or Null in case of error
|
||
|
*/
|
||
|
readFileAsync: function (/**Object*/ entry, /**Function*/ callback) {
|
||
|
var item = getEntry(entry);
|
||
|
if (item) {
|
||
|
item.getDataAsync(callback);
|
||
|
} else {
|
||
|
callback(null, "getEntry failed for:" + entry);
|
||
|
}
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Extracts the given entry from the archive and returns the content as plain text in the given encoding
|
||
|
* @param entry ZipEntry object or String with the full path of the entry
|
||
|
* @param encoding Optional. If no encoding is specified utf8 is used
|
||
|
*
|
||
|
* @return String
|
||
|
*/
|
||
|
readAsText: function (/**Object*/ entry, /**String=*/ encoding) {
|
||
|
var item = getEntry(entry);
|
||
|
if (item) {
|
||
|
var data = item.getData();
|
||
|
if (data && data.length) {
|
||
|
return data.toString(encoding || "utf8");
|
||
|
}
|
||
|
}
|
||
|
return "";
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Asynchronous readAsText
|
||
|
* @param entry ZipEntry object or String with the full path of the entry
|
||
|
* @param callback
|
||
|
* @param encoding Optional. If no encoding is specified utf8 is used
|
||
|
*
|
||
|
* @return String
|
||
|
*/
|
||
|
readAsTextAsync: function (/**Object*/ entry, /**Function*/ callback, /**String=*/ encoding) {
|
||
|
var item = getEntry(entry);
|
||
|
if (item) {
|
||
|
item.getDataAsync(function (data, err) {
|
||
|
if (err) {
|
||
|
callback(data, err);
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
if (data && data.length) {
|
||
|
callback(data.toString(encoding || "utf8"));
|
||
|
} else {
|
||
|
callback("");
|
||
|
}
|
||
|
});
|
||
|
} else {
|
||
|
callback("");
|
||
|
}
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Remove the entry from the file or the entry and all it's nested directories and files if the given entry is a directory
|
||
|
*
|
||
|
* @param entry
|
||
|
*/
|
||
|
deleteFile: function (/**Object*/ entry) {
|
||
|
// @TODO: test deleteFile
|
||
|
var item = getEntry(entry);
|
||
|
if (item) {
|
||
|
_zip.deleteEntry(item.entryName);
|
||
|
}
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Adds a comment to the zip. The zip must be rewritten after adding the comment.
|
||
|
*
|
||
|
* @param comment
|
||
|
*/
|
||
|
addZipComment: function (/**String*/ comment) {
|
||
|
// @TODO: test addZipComment
|
||
|
_zip.comment = comment;
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Returns the zip comment
|
||
|
*
|
||
|
* @return String
|
||
|
*/
|
||
|
getZipComment: function () {
|
||
|
return _zip.comment || "";
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Adds a comment to a specified zipEntry. The zip must be rewritten after adding the comment
|
||
|
* The comment cannot exceed 65535 characters in length
|
||
|
*
|
||
|
* @param entry
|
||
|
* @param comment
|
||
|
*/
|
||
|
addZipEntryComment: function (/**Object*/ entry, /**String*/ comment) {
|
||
|
var item = getEntry(entry);
|
||
|
if (item) {
|
||
|
item.comment = comment;
|
||
|
}
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Returns the comment of the specified entry
|
||
|
*
|
||
|
* @param entry
|
||
|
* @return String
|
||
|
*/
|
||
|
getZipEntryComment: function (/**Object*/ entry) {
|
||
|
var item = getEntry(entry);
|
||
|
if (item) {
|
||
|
return item.comment || "";
|
||
|
}
|
||
|
return "";
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Updates the content of an existing entry inside the archive. The zip must be rewritten after updating the content
|
||
|
*
|
||
|
* @param entry
|
||
|
* @param content
|
||
|
*/
|
||
|
updateFile: function (/**Object*/ entry, /**Buffer*/ content) {
|
||
|
var item = getEntry(entry);
|
||
|
if (item) {
|
||
|
item.setData(content);
|
||
|
}
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Adds a file from the disk to the archive
|
||
|
*
|
||
|
* @param localPath File to add to zip
|
||
|
* @param zipPath Optional path inside the zip
|
||
|
* @param zipName Optional name for the file
|
||
|
*/
|
||
|
addLocalFile: function (/**String*/ localPath, /**String=*/ zipPath, /**String=*/ zipName, /**String*/ comment) {
|
||
|
if (filetools.fs.existsSync(localPath)) {
|
||
|
// fix ZipPath
|
||
|
zipPath = zipPath ? fixPath(zipPath) : "";
|
||
|
|
||
|
// p - local file name
|
||
|
var p = localPath.split("\\").join("/").split("/").pop();
|
||
|
|
||
|
// add file name into zippath
|
||
|
zipPath += zipName ? zipName : p;
|
||
|
|
||
|
// read file attributes
|
||
|
const _attr = filetools.fs.statSync(localPath);
|
||
|
|
||
|
// add file into zip file
|
||
|
this.addFile(zipPath, filetools.fs.readFileSync(localPath), comment, _attr);
|
||
|
} else {
|
||
|
throw new Error(Utils.Errors.FILE_NOT_FOUND.replace("%s", localPath));
|
||
|
}
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Adds a local directory and all its nested files and directories to the archive
|
||
|
*
|
||
|
* @param localPath
|
||
|
* @param zipPath optional path inside zip
|
||
|
* @param filter optional RegExp or Function if files match will
|
||
|
* be included.
|
||
|
*/
|
||
|
addLocalFolder: function (/**String*/ localPath, /**String=*/ zipPath, /**=RegExp|Function*/ filter) {
|
||
|
// Prepare filter
|
||
|
if (filter instanceof RegExp) {
|
||
|
// if filter is RegExp wrap it
|
||
|
filter = (function (rx) {
|
||
|
return function (filename) {
|
||
|
return rx.test(filename);
|
||
|
};
|
||
|
})(filter);
|
||
|
} else if ("function" !== typeof filter) {
|
||
|
// if filter is not function we will replace it
|
||
|
filter = function () {
|
||
|
return true;
|
||
|
};
|
||
|
}
|
||
|
|
||
|
// fix ZipPath
|
||
|
zipPath = zipPath ? fixPath(zipPath) : "";
|
||
|
|
||
|
// normalize the path first
|
||
|
localPath = pth.normalize(localPath);
|
||
|
|
||
|
if (filetools.fs.existsSync(localPath)) {
|
||
|
const items = filetools.findFiles(localPath);
|
||
|
const self = this;
|
||
|
|
||
|
if (items.length) {
|
||
|
items.forEach(function (filepath) {
|
||
|
var p = pth.relative(localPath, filepath).split("\\").join("/"); //windows fix
|
||
|
if (filter(p)) {
|
||
|
var stats = filetools.fs.statSync(filepath);
|
||
|
if (stats.isFile()) {
|
||
|
self.addFile(zipPath + p, filetools.fs.readFileSync(filepath), "", stats);
|
||
|
} else {
|
||
|
self.addFile(zipPath + p + "/", Buffer.alloc(0), "", stats);
|
||
|
}
|
||
|
}
|
||
|
});
|
||
|
}
|
||
|
} else {
|
||
|
throw new Error(Utils.Errors.FILE_NOT_FOUND.replace("%s", localPath));
|
||
|
}
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Asynchronous addLocalFile
|
||
|
* @param localPath
|
||
|
* @param callback
|
||
|
* @param zipPath optional path inside zip
|
||
|
* @param filter optional RegExp or Function if files match will
|
||
|
* be included.
|
||
|
*/
|
||
|
addLocalFolderAsync: function (/*String*/ localPath, /*Function*/ callback, /*String*/ zipPath, /*RegExp|Function*/ filter) {
|
||
|
if (filter instanceof RegExp) {
|
||
|
filter = (function (rx) {
|
||
|
return function (filename) {
|
||
|
return rx.test(filename);
|
||
|
};
|
||
|
})(filter);
|
||
|
} else if ("function" !== typeof filter) {
|
||
|
filter = function () {
|
||
|
return true;
|
||
|
};
|
||
|
}
|
||
|
|
||
|
// fix ZipPath
|
||
|
zipPath = zipPath ? fixPath(zipPath) : "";
|
||
|
|
||
|
// normalize the path first
|
||
|
localPath = pth.normalize(localPath);
|
||
|
|
||
|
var self = this;
|
||
|
filetools.fs.open(localPath, "r", function (err) {
|
||
|
if (err && err.code === "ENOENT") {
|
||
|
callback(undefined, Utils.Errors.FILE_NOT_FOUND.replace("%s", localPath));
|
||
|
} else if (err) {
|
||
|
callback(undefined, err);
|
||
|
} else {
|
||
|
var items = filetools.findFiles(localPath);
|
||
|
var i = -1;
|
||
|
|
||
|
var next = function () {
|
||
|
i += 1;
|
||
|
if (i < items.length) {
|
||
|
var filepath = items[i];
|
||
|
var p = pth.relative(localPath, filepath).split("\\").join("/"); //windows fix
|
||
|
p = p
|
||
|
.normalize("NFD")
|
||
|
.replace(/[\u0300-\u036f]/g, "")
|
||
|
.replace(/[^\x20-\x7E]/g, ""); // accent fix
|
||
|
if (filter(p)) {
|
||
|
filetools.fs.stat(filepath, function (er0, stats) {
|
||
|
if (er0) callback(undefined, er0);
|
||
|
if (stats.isFile()) {
|
||
|
filetools.fs.readFile(filepath, function (er1, data) {
|
||
|
if (er1) {
|
||
|
callback(undefined, er1);
|
||
|
} else {
|
||
|
self.addFile(zipPath + p, data, "", stats);
|
||
|
next();
|
||
|
}
|
||
|
});
|
||
|
} else {
|
||
|
self.addFile(zipPath + p + "/", Buffer.alloc(0), "", stats);
|
||
|
next();
|
||
|
}
|
||
|
});
|
||
|
} else {
|
||
|
next();
|
||
|
}
|
||
|
} else {
|
||
|
callback(true, undefined);
|
||
|
}
|
||
|
};
|
||
|
|
||
|
next();
|
||
|
}
|
||
|
});
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
*
|
||
|
* @param {string} localPath - path where files will be extracted
|
||
|
* @param {object} props - optional properties
|
||
|
* @param {string} props.zipPath - optional path inside zip
|
||
|
* @param {regexp, function} props.filter - RegExp or Function if files match will be included.
|
||
|
*/
|
||
|
addLocalFolderPromise: function (/*String*/ localPath, /* object */ props) {
|
||
|
return new Promise((resolve, reject) => {
|
||
|
const { filter, zipPath } = Object.assign({}, props);
|
||
|
this.addLocalFolderAsync(
|
||
|
localPath,
|
||
|
(done, err) => {
|
||
|
if (err) reject(err);
|
||
|
if (done) resolve(this);
|
||
|
},
|
||
|
zipPath,
|
||
|
filter
|
||
|
);
|
||
|
});
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Allows you to create a entry (file or directory) in the zip file.
|
||
|
* If you want to create a directory the entryName must end in / and a null buffer should be provided.
|
||
|
* Comment and attributes are optional
|
||
|
*
|
||
|
* @param {string} entryName
|
||
|
* @param {Buffer | string} content - file content as buffer or utf8 coded string
|
||
|
* @param {string} comment - file comment
|
||
|
* @param {number | object} attr - number as unix file permissions, object as filesystem Stats object
|
||
|
*/
|
||
|
addFile: function (/**String*/ entryName, /**Buffer*/ content, /**String*/ comment, /**Number*/ attr) {
|
||
|
let entry = getEntry(entryName);
|
||
|
const update = entry != null;
|
||
|
|
||
|
// prepare new entry
|
||
|
if (!update) {
|
||
|
entry = new ZipEntry();
|
||
|
entry.entryName = entryName;
|
||
|
}
|
||
|
entry.comment = comment || "";
|
||
|
|
||
|
const isStat = "object" === typeof attr && attr instanceof filetools.fs.Stats;
|
||
|
|
||
|
// last modification time from file stats
|
||
|
if (isStat) {
|
||
|
entry.header.time = attr.mtime;
|
||
|
}
|
||
|
|
||
|
// Set file attribute
|
||
|
var fileattr = entry.isDirectory ? 0x10 : 0; // (MS-DOS directory flag)
|
||
|
|
||
|
// extended attributes field for Unix
|
||
|
if (!Utils.isWin) {
|
||
|
// set file type either S_IFDIR / S_IFREG
|
||
|
let unix = entry.isDirectory ? 0x4000 : 0x8000;
|
||
|
|
||
|
if (isStat) {
|
||
|
// File attributes from file stats
|
||
|
unix |= 0xfff & attr.mode;
|
||
|
} else if ("number" === typeof attr) {
|
||
|
// attr from given attr values
|
||
|
unix |= 0xfff & attr;
|
||
|
} else {
|
||
|
// Default values:
|
||
|
unix |= entry.isDirectory ? 0o755 : 0o644; // permissions (drwxr-xr-x) or (-r-wr--r--)
|
||
|
}
|
||
|
|
||
|
fileattr = (fileattr | (unix << 16)) >>> 0; // add attributes
|
||
|
}
|
||
|
|
||
|
entry.attr = fileattr;
|
||
|
|
||
|
entry.setData(content);
|
||
|
if (!update) _zip.setEntry(entry);
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Returns an array of ZipEntry objects representing the files and folders inside the archive
|
||
|
*
|
||
|
* @return Array
|
||
|
*/
|
||
|
getEntries: function () {
|
||
|
return _zip ? _zip.entries : [];
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Returns a ZipEntry object representing the file or folder specified by ``name``.
|
||
|
*
|
||
|
* @param name
|
||
|
* @return ZipEntry
|
||
|
*/
|
||
|
getEntry: function (/**String*/ name) {
|
||
|
return getEntry(name);
|
||
|
},
|
||
|
|
||
|
getEntryCount: function () {
|
||
|
return _zip.getEntryCount();
|
||
|
},
|
||
|
|
||
|
forEach: function (callback) {
|
||
|
return _zip.forEach(callback);
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Extracts the given entry to the given targetPath
|
||
|
* If the entry is a directory inside the archive, the entire directory and it's subdirectories will be extracted
|
||
|
*
|
||
|
* @param entry ZipEntry object or String with the full path of the entry
|
||
|
* @param targetPath Target folder where to write the file
|
||
|
* @param maintainEntryPath If maintainEntryPath is true and the entry is inside a folder, the entry folder
|
||
|
* will be created in targetPath as well. Default is TRUE
|
||
|
* @param overwrite If the file already exists at the target path, the file will be overwriten if this is true.
|
||
|
* Default is FALSE
|
||
|
* @param keepOriginalPermission The file will be set as the permission from the entry if this is true.
|
||
|
* Default is FALSE
|
||
|
* @param outFileName String If set will override the filename of the extracted file (Only works if the entry is a file)
|
||
|
*
|
||
|
* @return Boolean
|
||
|
*/
|
||
|
extractEntryTo: function (
|
||
|
/**Object*/ entry,
|
||
|
/**String*/ targetPath,
|
||
|
/**Boolean*/ maintainEntryPath,
|
||
|
/**Boolean*/ overwrite,
|
||
|
/**Boolean*/ keepOriginalPermission,
|
||
|
/**String**/ outFileName
|
||
|
) {
|
||
|
overwrite = get_Bool(overwrite, false);
|
||
|
keepOriginalPermission = get_Bool(keepOriginalPermission, false);
|
||
|
maintainEntryPath = get_Bool(maintainEntryPath, true);
|
||
|
outFileName = get_Str(outFileName, get_Str(keepOriginalPermission, undefined));
|
||
|
|
||
|
var item = getEntry(entry);
|
||
|
if (!item) {
|
||
|
throw new Error(Utils.Errors.NO_ENTRY);
|
||
|
}
|
||
|
|
||
|
var entryName = canonical(item.entryName);
|
||
|
|
||
|
var target = sanitize(targetPath, outFileName && !item.isDirectory ? outFileName : maintainEntryPath ? entryName : pth.basename(entryName));
|
||
|
|
||
|
if (item.isDirectory) {
|
||
|
var children = _zip.getEntryChildren(item);
|
||
|
children.forEach(function (child) {
|
||
|
if (child.isDirectory) return;
|
||
|
var content = child.getData();
|
||
|
if (!content) {
|
||
|
throw new Error(Utils.Errors.CANT_EXTRACT_FILE);
|
||
|
}
|
||
|
var name = canonical(child.entryName);
|
||
|
var childName = sanitize(targetPath, maintainEntryPath ? name : pth.basename(name));
|
||
|
// The reverse operation for attr depend on method addFile()
|
||
|
const fileAttr = keepOriginalPermission ? child.header.fileAttr : undefined;
|
||
|
filetools.writeFileTo(childName, content, overwrite, fileAttr);
|
||
|
});
|
||
|
return true;
|
||
|
}
|
||
|
|
||
|
var content = item.getData();
|
||
|
if (!content) throw new Error(Utils.Errors.CANT_EXTRACT_FILE);
|
||
|
|
||
|
if (filetools.fs.existsSync(target) && !overwrite) {
|
||
|
throw new Error(Utils.Errors.CANT_OVERRIDE);
|
||
|
}
|
||
|
// The reverse operation for attr depend on method addFile()
|
||
|
const fileAttr = keepOriginalPermission ? entry.header.fileAttr : undefined;
|
||
|
filetools.writeFileTo(target, content, overwrite, fileAttr);
|
||
|
|
||
|
return true;
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Test the archive
|
||
|
*
|
||
|
*/
|
||
|
test: function (pass) {
|
||
|
if (!_zip) {
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
for (var entry in _zip.entries) {
|
||
|
try {
|
||
|
if (entry.isDirectory) {
|
||
|
continue;
|
||
|
}
|
||
|
var content = _zip.entries[entry].getData(pass);
|
||
|
if (!content) {
|
||
|
return false;
|
||
|
}
|
||
|
} catch (err) {
|
||
|
return false;
|
||
|
}
|
||
|
}
|
||
|
return true;
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Extracts the entire archive to the given location
|
||
|
*
|
||
|
* @param targetPath Target location
|
||
|
* @param overwrite If the file already exists at the target path, the file will be overwriten if this is true.
|
||
|
* Default is FALSE
|
||
|
* @param keepOriginalPermission The file will be set as the permission from the entry if this is true.
|
||
|
* Default is FALSE
|
||
|
*/
|
||
|
extractAllTo: function (/**String*/ targetPath, /**Boolean*/ overwrite, /**Boolean*/ keepOriginalPermission, /*String, Buffer*/ pass) {
|
||
|
overwrite = get_Bool(overwrite, false);
|
||
|
pass = get_Str(keepOriginalPermission, pass);
|
||
|
keepOriginalPermission = get_Bool(keepOriginalPermission, false);
|
||
|
if (!_zip) {
|
||
|
throw new Error(Utils.Errors.NO_ZIP);
|
||
|
}
|
||
|
_zip.entries.forEach(function (entry) {
|
||
|
var entryName = sanitize(targetPath, canonical(entry.entryName.toString()));
|
||
|
if (entry.isDirectory) {
|
||
|
filetools.makeDir(entryName);
|
||
|
return;
|
||
|
}
|
||
|
var content = entry.getData(pass);
|
||
|
if (!content) {
|
||
|
throw new Error(Utils.Errors.CANT_EXTRACT_FILE);
|
||
|
}
|
||
|
// The reverse operation for attr depend on method addFile()
|
||
|
const fileAttr = keepOriginalPermission ? entry.header.fileAttr : undefined;
|
||
|
filetools.writeFileTo(entryName, content, overwrite, fileAttr);
|
||
|
try {
|
||
|
filetools.fs.utimesSync(entryName, entry.header.time, entry.header.time);
|
||
|
} catch (err) {
|
||
|
throw new Error(Utils.Errors.CANT_EXTRACT_FILE);
|
||
|
}
|
||
|
});
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Asynchronous extractAllTo
|
||
|
*
|
||
|
* @param targetPath Target location
|
||
|
* @param overwrite If the file already exists at the target path, the file will be overwriten if this is true.
|
||
|
* Default is FALSE
|
||
|
* @param keepOriginalPermission The file will be set as the permission from the entry if this is true.
|
||
|
* Default is FALSE
|
||
|
* @param callback The callback will be executed when all entries are extracted successfully or any error is thrown.
|
||
|
*/
|
||
|
extractAllToAsync: function (/**String*/ targetPath, /**Boolean*/ overwrite, /**Boolean*/ keepOriginalPermission, /**Function*/ callback) {
|
||
|
if (!callback) {
|
||
|
callback = function () {};
|
||
|
}
|
||
|
overwrite = get_Bool(overwrite, false);
|
||
|
if (typeof keepOriginalPermission === "function" && !callback) callback = keepOriginalPermission;
|
||
|
keepOriginalPermission = get_Bool(keepOriginalPermission, false);
|
||
|
if (!_zip) {
|
||
|
callback(new Error(Utils.Errors.NO_ZIP));
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
targetPath = pth.resolve(targetPath);
|
||
|
// convert entryName to
|
||
|
const getPath = (entry) => sanitize(targetPath, pth.normalize(canonical(entry.entryName.toString())));
|
||
|
const getError = (msg, file) => new Error(msg + ': "' + file + '"');
|
||
|
|
||
|
// separate directories from files
|
||
|
const dirEntries = [];
|
||
|
const fileEntries = new Set();
|
||
|
_zip.entries.forEach((e) => {
|
||
|
if (e.isDirectory) {
|
||
|
dirEntries.push(e);
|
||
|
} else {
|
||
|
fileEntries.add(e);
|
||
|
}
|
||
|
});
|
||
|
|
||
|
// Create directory entries first synchronously
|
||
|
// this prevents race condition and assures folders are there before writing files
|
||
|
for (const entry of dirEntries) {
|
||
|
const dirPath = getPath(entry);
|
||
|
// The reverse operation for attr depend on method addFile()
|
||
|
const dirAttr = keepOriginalPermission ? entry.header.fileAttr : undefined;
|
||
|
try {
|
||
|
filetools.makeDir(dirPath);
|
||
|
if (dirAttr) filetools.fs.chmodSync(dirPath, dirAttr);
|
||
|
// in unix timestamp will change if files are later added to folder, but still
|
||
|
filetools.fs.utimesSync(dirPath, entry.header.time, entry.header.time);
|
||
|
} catch (er) {
|
||
|
callback(getError("Unable to create folder", dirPath));
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// callback wrapper, for some house keeping
|
||
|
const done = () => {
|
||
|
if (fileEntries.size === 0) {
|
||
|
callback();
|
||
|
}
|
||
|
};
|
||
|
|
||
|
// Extract file entries asynchronously
|
||
|
for (const entry of fileEntries.values()) {
|
||
|
const entryName = pth.normalize(canonical(entry.entryName.toString()));
|
||
|
const filePath = sanitize(targetPath, entryName);
|
||
|
entry.getDataAsync(function (content, err_1) {
|
||
|
if (err_1) {
|
||
|
callback(new Error(err_1));
|
||
|
return;
|
||
|
}
|
||
|
if (!content) {
|
||
|
callback(new Error(Utils.Errors.CANT_EXTRACT_FILE));
|
||
|
} else {
|
||
|
// The reverse operation for attr depend on method addFile()
|
||
|
const fileAttr = keepOriginalPermission ? entry.header.fileAttr : undefined;
|
||
|
filetools.writeFileToAsync(filePath, content, overwrite, fileAttr, function (succ) {
|
||
|
if (!succ) {
|
||
|
callback(getError("Unable to write file", filePath));
|
||
|
return;
|
||
|
}
|
||
|
filetools.fs.utimes(filePath, entry.header.time, entry.header.time, function (err_2) {
|
||
|
if (err_2) {
|
||
|
callback(getError("Unable to set times", filePath));
|
||
|
return;
|
||
|
}
|
||
|
fileEntries.delete(entry);
|
||
|
// call the callback if it was last entry
|
||
|
done();
|
||
|
});
|
||
|
});
|
||
|
}
|
||
|
});
|
||
|
}
|
||
|
// call the callback if fileEntries was empty
|
||
|
done();
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Writes the newly created zip file to disk at the specified location or if a zip was opened and no ``targetFileName`` is provided, it will overwrite the opened zip
|
||
|
*
|
||
|
* @param targetFileName
|
||
|
* @param callback
|
||
|
*/
|
||
|
writeZip: function (/**String*/ targetFileName, /**Function*/ callback) {
|
||
|
if (arguments.length === 1) {
|
||
|
if (typeof targetFileName === "function") {
|
||
|
callback = targetFileName;
|
||
|
targetFileName = "";
|
||
|
}
|
||
|
}
|
||
|
|
||
|
if (!targetFileName && opts.filename) {
|
||
|
targetFileName = opts.filename;
|
||
|
}
|
||
|
if (!targetFileName) return;
|
||
|
|
||
|
var zipData = _zip.compressToBuffer();
|
||
|
if (zipData) {
|
||
|
var ok = filetools.writeFileTo(targetFileName, zipData, true);
|
||
|
if (typeof callback === "function") callback(!ok ? new Error("failed") : null, "");
|
||
|
}
|
||
|
},
|
||
|
|
||
|
writeZipPromise: function (/**String*/ targetFileName, /* object */ props) {
|
||
|
const { overwrite, perm } = Object.assign({ overwrite: true }, props);
|
||
|
|
||
|
return new Promise((resolve, reject) => {
|
||
|
// find file name
|
||
|
if (!targetFileName && opts.filename) targetFileName = opts.filename;
|
||
|
if (!targetFileName) reject("ADM-ZIP: ZIP File Name Missing");
|
||
|
|
||
|
this.toBufferPromise().then((zipData) => {
|
||
|
const ret = (done) => (done ? resolve(done) : reject("ADM-ZIP: Wasn't able to write zip file"));
|
||
|
filetools.writeFileToAsync(targetFileName, zipData, overwrite, perm, ret);
|
||
|
}, reject);
|
||
|
});
|
||
|
},
|
||
|
|
||
|
toBufferPromise: function () {
|
||
|
return new Promise((resolve, reject) => {
|
||
|
_zip.toAsyncBuffer(resolve, reject);
|
||
|
});
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Returns the content of the entire zip file as a Buffer object
|
||
|
*
|
||
|
* @return Buffer
|
||
|
*/
|
||
|
toBuffer: function (/**Function=*/ onSuccess, /**Function=*/ onFail, /**Function=*/ onItemStart, /**Function=*/ onItemEnd) {
|
||
|
this.valueOf = 2;
|
||
|
if (typeof onSuccess === "function") {
|
||
|
_zip.toAsyncBuffer(onSuccess, onFail, onItemStart, onItemEnd);
|
||
|
return null;
|
||
|
}
|
||
|
return _zip.compressToBuffer();
|
||
|
}
|
||
|
};
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 42907:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
var Utils = __webpack_require__(85173),
|
||
|
Constants = Utils.Constants;
|
||
|
|
||
|
/* The central directory file header */
|
||
|
module.exports = function () {
|
||
|
var _verMade = 20, // v2.0
|
||
|
_version = 10, // v1.0
|
||
|
_flags = 0,
|
||
|
_method = 0,
|
||
|
_time = 0,
|
||
|
_crc = 0,
|
||
|
_compressedSize = 0,
|
||
|
_size = 0,
|
||
|
_fnameLen = 0,
|
||
|
_extraLen = 0,
|
||
|
_comLen = 0,
|
||
|
_diskStart = 0,
|
||
|
_inattr = 0,
|
||
|
_attr = 0,
|
||
|
_offset = 0;
|
||
|
|
||
|
_verMade |= Utils.isWin ? 0x0a00 : 0x0300;
|
||
|
|
||
|
// Set EFS flag since filename and comment fields are all by default encoded using UTF-8.
|
||
|
// Without it file names may be corrupted for other apps when file names use unicode chars
|
||
|
_flags |= Constants.FLG_EFS;
|
||
|
|
||
|
var _dataHeader = {};
|
||
|
|
||
|
function setTime(val) {
|
||
|
val = new Date(val);
|
||
|
_time =
|
||
|
(((val.getFullYear() - 1980) & 0x7f) << 25) | // b09-16 years from 1980
|
||
|
((val.getMonth() + 1) << 21) | // b05-08 month
|
||
|
(val.getDate() << 16) | // b00-04 hour
|
||
|
// 2 bytes time
|
||
|
(val.getHours() << 11) | // b11-15 hour
|
||
|
(val.getMinutes() << 5) | // b05-10 minute
|
||
|
(val.getSeconds() >> 1); // b00-04 seconds divided by 2
|
||
|
}
|
||
|
|
||
|
setTime(+new Date());
|
||
|
|
||
|
return {
|
||
|
get made() {
|
||
|
return _verMade;
|
||
|
},
|
||
|
set made(val) {
|
||
|
_verMade = val;
|
||
|
},
|
||
|
|
||
|
get version() {
|
||
|
return _version;
|
||
|
},
|
||
|
set version(val) {
|
||
|
_version = val;
|
||
|
},
|
||
|
|
||
|
get flags() {
|
||
|
return _flags;
|
||
|
},
|
||
|
set flags(val) {
|
||
|
_flags = val;
|
||
|
},
|
||
|
|
||
|
get method() {
|
||
|
return _method;
|
||
|
},
|
||
|
set method(val) {
|
||
|
switch (val) {
|
||
|
case Constants.STORED:
|
||
|
this.version = 10;
|
||
|
case Constants.DEFLATED:
|
||
|
default:
|
||
|
this.version = 20;
|
||
|
}
|
||
|
_method = val;
|
||
|
},
|
||
|
|
||
|
get time() {
|
||
|
return new Date(((_time >> 25) & 0x7f) + 1980, ((_time >> 21) & 0x0f) - 1, (_time >> 16) & 0x1f, (_time >> 11) & 0x1f, (_time >> 5) & 0x3f, (_time & 0x1f) << 1);
|
||
|
},
|
||
|
set time(val) {
|
||
|
setTime(val);
|
||
|
},
|
||
|
|
||
|
get crc() {
|
||
|
return _crc;
|
||
|
},
|
||
|
set crc(val) {
|
||
|
_crc = Math.max(0, val) >>> 0;
|
||
|
},
|
||
|
|
||
|
get compressedSize() {
|
||
|
return _compressedSize;
|
||
|
},
|
||
|
set compressedSize(val) {
|
||
|
_compressedSize = Math.max(0, val) >>> 0;
|
||
|
},
|
||
|
|
||
|
get size() {
|
||
|
return _size;
|
||
|
},
|
||
|
set size(val) {
|
||
|
_size = Math.max(0, val) >>> 0;
|
||
|
},
|
||
|
|
||
|
get fileNameLength() {
|
||
|
return _fnameLen;
|
||
|
},
|
||
|
set fileNameLength(val) {
|
||
|
_fnameLen = val;
|
||
|
},
|
||
|
|
||
|
get extraLength() {
|
||
|
return _extraLen;
|
||
|
},
|
||
|
set extraLength(val) {
|
||
|
_extraLen = val;
|
||
|
},
|
||
|
|
||
|
get commentLength() {
|
||
|
return _comLen;
|
||
|
},
|
||
|
set commentLength(val) {
|
||
|
_comLen = val;
|
||
|
},
|
||
|
|
||
|
get diskNumStart() {
|
||
|
return _diskStart;
|
||
|
},
|
||
|
set diskNumStart(val) {
|
||
|
_diskStart = Math.max(0, val) >>> 0;
|
||
|
},
|
||
|
|
||
|
get inAttr() {
|
||
|
return _inattr;
|
||
|
},
|
||
|
set inAttr(val) {
|
||
|
_inattr = Math.max(0, val) >>> 0;
|
||
|
},
|
||
|
|
||
|
get attr() {
|
||
|
return _attr;
|
||
|
},
|
||
|
set attr(val) {
|
||
|
_attr = Math.max(0, val) >>> 0;
|
||
|
},
|
||
|
|
||
|
// get Unix file permissions
|
||
|
get fileAttr() {
|
||
|
return _attr ? (((_attr >>> 0) | 0) >> 16) & 0xfff : 0;
|
||
|
},
|
||
|
|
||
|
get offset() {
|
||
|
return _offset;
|
||
|
},
|
||
|
set offset(val) {
|
||
|
_offset = Math.max(0, val) >>> 0;
|
||
|
},
|
||
|
|
||
|
get encripted() {
|
||
|
return (_flags & 1) === 1;
|
||
|
},
|
||
|
|
||
|
get entryHeaderSize() {
|
||
|
return Constants.CENHDR + _fnameLen + _extraLen + _comLen;
|
||
|
},
|
||
|
|
||
|
get realDataOffset() {
|
||
|
return _offset + Constants.LOCHDR + _dataHeader.fnameLen + _dataHeader.extraLen;
|
||
|
},
|
||
|
|
||
|
get dataHeader() {
|
||
|
return _dataHeader;
|
||
|
},
|
||
|
|
||
|
loadDataHeaderFromBinary: function (/*Buffer*/ input) {
|
||
|
var data = input.slice(_offset, _offset + Constants.LOCHDR);
|
||
|
// 30 bytes and should start with "PK\003\004"
|
||
|
if (data.readUInt32LE(0) !== Constants.LOCSIG) {
|
||
|
throw new Error(Utils.Errors.INVALID_LOC);
|
||
|
}
|
||
|
_dataHeader = {
|
||
|
// version needed to extract
|
||
|
version: data.readUInt16LE(Constants.LOCVER),
|
||
|
// general purpose bit flag
|
||
|
flags: data.readUInt16LE(Constants.LOCFLG),
|
||
|
// compression method
|
||
|
method: data.readUInt16LE(Constants.LOCHOW),
|
||
|
// modification time (2 bytes time, 2 bytes date)
|
||
|
time: data.readUInt32LE(Constants.LOCTIM),
|
||
|
// uncompressed file crc-32 value
|
||
|
crc: data.readUInt32LE(Constants.LOCCRC),
|
||
|
// compressed size
|
||
|
compressedSize: data.readUInt32LE(Constants.LOCSIZ),
|
||
|
// uncompressed size
|
||
|
size: data.readUInt32LE(Constants.LOCLEN),
|
||
|
// filename length
|
||
|
fnameLen: data.readUInt16LE(Constants.LOCNAM),
|
||
|
// extra field length
|
||
|
extraLen: data.readUInt16LE(Constants.LOCEXT)
|
||
|
};
|
||
|
},
|
||
|
|
||
|
loadFromBinary: function (/*Buffer*/ data) {
|
||
|
// data should be 46 bytes and start with "PK 01 02"
|
||
|
if (data.length !== Constants.CENHDR || data.readUInt32LE(0) !== Constants.CENSIG) {
|
||
|
throw new Error(Utils.Errors.INVALID_CEN);
|
||
|
}
|
||
|
// version made by
|
||
|
_verMade = data.readUInt16LE(Constants.CENVEM);
|
||
|
// version needed to extract
|
||
|
_version = data.readUInt16LE(Constants.CENVER);
|
||
|
// encrypt, decrypt flags
|
||
|
_flags = data.readUInt16LE(Constants.CENFLG);
|
||
|
// compression method
|
||
|
_method = data.readUInt16LE(Constants.CENHOW);
|
||
|
// modification time (2 bytes time, 2 bytes date)
|
||
|
_time = data.readUInt32LE(Constants.CENTIM);
|
||
|
// uncompressed file crc-32 value
|
||
|
_crc = data.readUInt32LE(Constants.CENCRC);
|
||
|
// compressed size
|
||
|
_compressedSize = data.readUInt32LE(Constants.CENSIZ);
|
||
|
// uncompressed size
|
||
|
_size = data.readUInt32LE(Constants.CENLEN);
|
||
|
// filename length
|
||
|
_fnameLen = data.readUInt16LE(Constants.CENNAM);
|
||
|
// extra field length
|
||
|
_extraLen = data.readUInt16LE(Constants.CENEXT);
|
||
|
// file comment length
|
||
|
_comLen = data.readUInt16LE(Constants.CENCOM);
|
||
|
// volume number start
|
||
|
_diskStart = data.readUInt16LE(Constants.CENDSK);
|
||
|
// internal file attributes
|
||
|
_inattr = data.readUInt16LE(Constants.CENATT);
|
||
|
// external file attributes
|
||
|
_attr = data.readUInt32LE(Constants.CENATX);
|
||
|
// LOC header offset
|
||
|
_offset = data.readUInt32LE(Constants.CENOFF);
|
||
|
},
|
||
|
|
||
|
dataHeaderToBinary: function () {
|
||
|
// LOC header size (30 bytes)
|
||
|
var data = Buffer.alloc(Constants.LOCHDR);
|
||
|
// "PK\003\004"
|
||
|
data.writeUInt32LE(Constants.LOCSIG, 0);
|
||
|
// version needed to extract
|
||
|
data.writeUInt16LE(_version, Constants.LOCVER);
|
||
|
// general purpose bit flag
|
||
|
data.writeUInt16LE(_flags, Constants.LOCFLG);
|
||
|
// compression method
|
||
|
data.writeUInt16LE(_method, Constants.LOCHOW);
|
||
|
// modification time (2 bytes time, 2 bytes date)
|
||
|
data.writeUInt32LE(_time, Constants.LOCTIM);
|
||
|
// uncompressed file crc-32 value
|
||
|
data.writeUInt32LE(_crc, Constants.LOCCRC);
|
||
|
// compressed size
|
||
|
data.writeUInt32LE(_compressedSize, Constants.LOCSIZ);
|
||
|
// uncompressed size
|
||
|
data.writeUInt32LE(_size, Constants.LOCLEN);
|
||
|
// filename length
|
||
|
data.writeUInt16LE(_fnameLen, Constants.LOCNAM);
|
||
|
// extra field length
|
||
|
data.writeUInt16LE(_extraLen, Constants.LOCEXT);
|
||
|
return data;
|
||
|
},
|
||
|
|
||
|
entryHeaderToBinary: function () {
|
||
|
// CEN header size (46 bytes)
|
||
|
var data = Buffer.alloc(Constants.CENHDR + _fnameLen + _extraLen + _comLen);
|
||
|
// "PK\001\002"
|
||
|
data.writeUInt32LE(Constants.CENSIG, 0);
|
||
|
// version made by
|
||
|
data.writeUInt16LE(_verMade, Constants.CENVEM);
|
||
|
// version needed to extract
|
||
|
data.writeUInt16LE(_version, Constants.CENVER);
|
||
|
// encrypt, decrypt flags
|
||
|
data.writeUInt16LE(_flags, Constants.CENFLG);
|
||
|
// compression method
|
||
|
data.writeUInt16LE(_method, Constants.CENHOW);
|
||
|
// modification time (2 bytes time, 2 bytes date)
|
||
|
data.writeUInt32LE(_time, Constants.CENTIM);
|
||
|
// uncompressed file crc-32 value
|
||
|
data.writeUInt32LE(_crc, Constants.CENCRC);
|
||
|
// compressed size
|
||
|
data.writeUInt32LE(_compressedSize, Constants.CENSIZ);
|
||
|
// uncompressed size
|
||
|
data.writeUInt32LE(_size, Constants.CENLEN);
|
||
|
// filename length
|
||
|
data.writeUInt16LE(_fnameLen, Constants.CENNAM);
|
||
|
// extra field length
|
||
|
data.writeUInt16LE(_extraLen, Constants.CENEXT);
|
||
|
// file comment length
|
||
|
data.writeUInt16LE(_comLen, Constants.CENCOM);
|
||
|
// volume number start
|
||
|
data.writeUInt16LE(_diskStart, Constants.CENDSK);
|
||
|
// internal file attributes
|
||
|
data.writeUInt16LE(_inattr, Constants.CENATT);
|
||
|
// external file attributes
|
||
|
data.writeUInt32LE(_attr, Constants.CENATX);
|
||
|
// LOC header offset
|
||
|
data.writeUInt32LE(_offset, Constants.CENOFF);
|
||
|
// fill all with
|
||
|
data.fill(0x00, Constants.CENHDR);
|
||
|
return data;
|
||
|
},
|
||
|
|
||
|
toJSON: function () {
|
||
|
const bytes = function (nr) {
|
||
|
return nr + " bytes";
|
||
|
};
|
||
|
|
||
|
return {
|
||
|
made: _verMade,
|
||
|
version: _version,
|
||
|
flags: _flags,
|
||
|
method: Utils.methodToString(_method),
|
||
|
time: this.time,
|
||
|
crc: "0x" + _crc.toString(16).toUpperCase(),
|
||
|
compressedSize: bytes(_compressedSize),
|
||
|
size: bytes(_size),
|
||
|
fileNameLength: bytes(_fnameLen),
|
||
|
extraLength: bytes(_extraLen),
|
||
|
commentLength: bytes(_comLen),
|
||
|
diskNumStart: _diskStart,
|
||
|
inAttr: _inattr,
|
||
|
attr: _attr,
|
||
|
offset: _offset,
|
||
|
entryHeaderSize: bytes(Constants.CENHDR + _fnameLen + _extraLen + _comLen)
|
||
|
};
|
||
|
},
|
||
|
|
||
|
toString: function () {
|
||
|
return JSON.stringify(this.toJSON(), null, "\t");
|
||
|
}
|
||
|
};
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 53854:
|
||
|
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
|
||
|
|
||
|
exports.EntryHeader = __webpack_require__(42907);
|
||
|
exports.MainHeader = __webpack_require__(83519);
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 83519:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
var Utils = __webpack_require__(85173),
|
||
|
Constants = Utils.Constants;
|
||
|
|
||
|
/* The entries in the end of central directory */
|
||
|
module.exports = function () {
|
||
|
var _volumeEntries = 0,
|
||
|
_totalEntries = 0,
|
||
|
_size = 0,
|
||
|
_offset = 0,
|
||
|
_commentLength = 0;
|
||
|
|
||
|
return {
|
||
|
get diskEntries() {
|
||
|
return _volumeEntries;
|
||
|
},
|
||
|
set diskEntries(/*Number*/ val) {
|
||
|
_volumeEntries = _totalEntries = val;
|
||
|
},
|
||
|
|
||
|
get totalEntries() {
|
||
|
return _totalEntries;
|
||
|
},
|
||
|
set totalEntries(/*Number*/ val) {
|
||
|
_totalEntries = _volumeEntries = val;
|
||
|
},
|
||
|
|
||
|
get size() {
|
||
|
return _size;
|
||
|
},
|
||
|
set size(/*Number*/ val) {
|
||
|
_size = val;
|
||
|
},
|
||
|
|
||
|
get offset() {
|
||
|
return _offset;
|
||
|
},
|
||
|
set offset(/*Number*/ val) {
|
||
|
_offset = val;
|
||
|
},
|
||
|
|
||
|
get commentLength() {
|
||
|
return _commentLength;
|
||
|
},
|
||
|
set commentLength(/*Number*/ val) {
|
||
|
_commentLength = val;
|
||
|
},
|
||
|
|
||
|
get mainHeaderSize() {
|
||
|
return Constants.ENDHDR + _commentLength;
|
||
|
},
|
||
|
|
||
|
loadFromBinary: function (/*Buffer*/ data) {
|
||
|
// data should be 22 bytes and start with "PK 05 06"
|
||
|
// or be 56+ bytes and start with "PK 06 06" for Zip64
|
||
|
if (
|
||
|
(data.length !== Constants.ENDHDR || data.readUInt32LE(0) !== Constants.ENDSIG) &&
|
||
|
(data.length < Constants.ZIP64HDR || data.readUInt32LE(0) !== Constants.ZIP64SIG)
|
||
|
) {
|
||
|
throw new Error(Utils.Errors.INVALID_END);
|
||
|
}
|
||
|
|
||
|
if (data.readUInt32LE(0) === Constants.ENDSIG) {
|
||
|
// number of entries on this volume
|
||
|
_volumeEntries = data.readUInt16LE(Constants.ENDSUB);
|
||
|
// total number of entries
|
||
|
_totalEntries = data.readUInt16LE(Constants.ENDTOT);
|
||
|
// central directory size in bytes
|
||
|
_size = data.readUInt32LE(Constants.ENDSIZ);
|
||
|
// offset of first CEN header
|
||
|
_offset = data.readUInt32LE(Constants.ENDOFF);
|
||
|
// zip file comment length
|
||
|
_commentLength = data.readUInt16LE(Constants.ENDCOM);
|
||
|
} else {
|
||
|
// number of entries on this volume
|
||
|
_volumeEntries = Utils.readBigUInt64LE(data, Constants.ZIP64SUB);
|
||
|
// total number of entries
|
||
|
_totalEntries = Utils.readBigUInt64LE(data, Constants.ZIP64TOT);
|
||
|
// central directory size in bytes
|
||
|
_size = Utils.readBigUInt64LE(data, Constants.ZIP64SIZ);
|
||
|
// offset of first CEN header
|
||
|
_offset = Utils.readBigUInt64LE(data, Constants.ZIP64OFF);
|
||
|
|
||
|
_commentLength = 0;
|
||
|
}
|
||
|
},
|
||
|
|
||
|
toBinary: function () {
|
||
|
var b = Buffer.alloc(Constants.ENDHDR + _commentLength);
|
||
|
// "PK 05 06" signature
|
||
|
b.writeUInt32LE(Constants.ENDSIG, 0);
|
||
|
b.writeUInt32LE(0, 4);
|
||
|
// number of entries on this volume
|
||
|
b.writeUInt16LE(_volumeEntries, Constants.ENDSUB);
|
||
|
// total number of entries
|
||
|
b.writeUInt16LE(_totalEntries, Constants.ENDTOT);
|
||
|
// central directory size in bytes
|
||
|
b.writeUInt32LE(_size, Constants.ENDSIZ);
|
||
|
// offset of first CEN header
|
||
|
b.writeUInt32LE(_offset, Constants.ENDOFF);
|
||
|
// zip file comment length
|
||
|
b.writeUInt16LE(_commentLength, Constants.ENDCOM);
|
||
|
// fill comment memory with spaces so no garbage is left there
|
||
|
b.fill(" ", Constants.ENDHDR);
|
||
|
|
||
|
return b;
|
||
|
},
|
||
|
|
||
|
toJSON: function () {
|
||
|
// creates 0x0000 style output
|
||
|
const offset = function (nr, len) {
|
||
|
let offs = nr.toString(16).toUpperCase();
|
||
|
while (offs.length < len) offs = "0" + offs;
|
||
|
return "0x" + offs;
|
||
|
};
|
||
|
|
||
|
return {
|
||
|
diskEntries: _volumeEntries,
|
||
|
totalEntries: _totalEntries,
|
||
|
size: _size + " bytes",
|
||
|
offset: offset(_offset, 4),
|
||
|
commentLength: _commentLength
|
||
|
};
|
||
|
},
|
||
|
|
||
|
toString: function () {
|
||
|
return JSON.stringify(this.toJSON(), null, "\t");
|
||
|
}
|
||
|
};
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 10278:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
module.exports = function (/*Buffer*/ inbuf) {
|
||
|
var zlib = __webpack_require__(78761);
|
||
|
|
||
|
var opts = { chunkSize: (parseInt(inbuf.length / 1024) + 1) * 1024 };
|
||
|
|
||
|
return {
|
||
|
deflate: function () {
|
||
|
return zlib.deflateRawSync(inbuf, opts);
|
||
|
},
|
||
|
|
||
|
deflateAsync: function (/*Function*/ callback) {
|
||
|
var tmp = zlib.createDeflateRaw(opts),
|
||
|
parts = [],
|
||
|
total = 0;
|
||
|
tmp.on("data", function (data) {
|
||
|
parts.push(data);
|
||
|
total += data.length;
|
||
|
});
|
||
|
tmp.on("end", function () {
|
||
|
var buf = Buffer.alloc(total),
|
||
|
written = 0;
|
||
|
buf.fill(0);
|
||
|
for (var i = 0; i < parts.length; i++) {
|
||
|
var part = parts[i];
|
||
|
part.copy(buf, written);
|
||
|
written += part.length;
|
||
|
}
|
||
|
callback && callback(buf);
|
||
|
});
|
||
|
tmp.end(inbuf);
|
||
|
}
|
||
|
};
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 81004:
|
||
|
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
|
||
|
|
||
|
exports.Deflater = __webpack_require__(10278);
|
||
|
exports.Inflater = __webpack_require__(61269);
|
||
|
exports.ZipCrypto = __webpack_require__(94729);
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 61269:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
module.exports = function (/*Buffer*/ inbuf) {
|
||
|
var zlib = __webpack_require__(78761);
|
||
|
|
||
|
return {
|
||
|
inflate: function () {
|
||
|
return zlib.inflateRawSync(inbuf);
|
||
|
},
|
||
|
|
||
|
inflateAsync: function (/*Function*/ callback) {
|
||
|
var tmp = zlib.createInflateRaw(),
|
||
|
parts = [],
|
||
|
total = 0;
|
||
|
tmp.on("data", function (data) {
|
||
|
parts.push(data);
|
||
|
total += data.length;
|
||
|
});
|
||
|
tmp.on("end", function () {
|
||
|
var buf = Buffer.alloc(total),
|
||
|
written = 0;
|
||
|
buf.fill(0);
|
||
|
for (var i = 0; i < parts.length; i++) {
|
||
|
var part = parts[i];
|
||
|
part.copy(buf, written);
|
||
|
written += part.length;
|
||
|
}
|
||
|
callback && callback(buf);
|
||
|
});
|
||
|
tmp.end(inbuf);
|
||
|
}
|
||
|
};
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 94729:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
|
||
|
|
||
|
// node crypt, we use it for generate salt
|
||
|
// eslint-disable-next-line node/no-unsupported-features/node-builtins
|
||
|
const { randomFillSync } = __webpack_require__(76417);
|
||
|
|
||
|
// generate CRC32 lookup table
|
||
|
const crctable = new Uint32Array(256).map((t, crc) => {
|
||
|
for (let j = 0; j < 8; j++) {
|
||
|
if (0 !== (crc & 1)) {
|
||
|
crc = (crc >>> 1) ^ 0xedb88320;
|
||
|
} else {
|
||
|
crc >>>= 1;
|
||
|
}
|
||
|
}
|
||
|
return crc >>> 0;
|
||
|
});
|
||
|
|
||
|
// C-style uInt32 Multiply (discards higher bits, when JS multiply discards lower bits)
|
||
|
const uMul = (a, b) => Math.imul(a, b) >>> 0;
|
||
|
|
||
|
// crc32 byte single update (actually same function is part of utils.crc32 function :) )
|
||
|
const crc32update = (pCrc32, bval) => {
|
||
|
return crctable[(pCrc32 ^ bval) & 0xff] ^ (pCrc32 >>> 8);
|
||
|
};
|
||
|
|
||
|
// function for generating salt for encrytion header
|
||
|
const genSalt = () => {
|
||
|
if ("function" === typeof randomFillSync) {
|
||
|
return randomFillSync(Buffer.alloc(12));
|
||
|
} else {
|
||
|
// fallback if function is not defined
|
||
|
return genSalt.node();
|
||
|
}
|
||
|
};
|
||
|
|
||
|
// salt generation with node random function (mainly as fallback)
|
||
|
genSalt.node = () => {
|
||
|
const salt = Buffer.alloc(12);
|
||
|
const len = salt.length;
|
||
|
for (let i = 0; i < len; i++) salt[i] = (Math.random() * 256) & 0xff;
|
||
|
return salt;
|
||
|
};
|
||
|
|
||
|
// general config
|
||
|
const config = {
|
||
|
genSalt
|
||
|
};
|
||
|
|
||
|
// Class Initkeys handles same basic ops with keys
|
||
|
function Initkeys(pw) {
|
||
|
const pass = Buffer.isBuffer(pw) ? pw : Buffer.from(pw);
|
||
|
this.keys = new Uint32Array([0x12345678, 0x23456789, 0x34567890]);
|
||
|
for (let i = 0; i < pass.length; i++) {
|
||
|
this.updateKeys(pass[i]);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
Initkeys.prototype.updateKeys = function (byteValue) {
|
||
|
const keys = this.keys;
|
||
|
keys[0] = crc32update(keys[0], byteValue);
|
||
|
keys[1] += keys[0] & 0xff;
|
||
|
keys[1] = uMul(keys[1], 134775813) + 1;
|
||
|
keys[2] = crc32update(keys[2], keys[1] >>> 24);
|
||
|
return byteValue;
|
||
|
};
|
||
|
|
||
|
Initkeys.prototype.next = function () {
|
||
|
const k = (this.keys[2] | 2) >>> 0; // key
|
||
|
return (uMul(k, k ^ 1) >> 8) & 0xff; // decode
|
||
|
};
|
||
|
|
||
|
function make_decrypter(/*Buffer*/ pwd) {
|
||
|
// 1. Stage initialize key
|
||
|
const keys = new Initkeys(pwd);
|
||
|
|
||
|
// return decrypter function
|
||
|
return function (/*Buffer*/ data) {
|
||
|
// result - we create new Buffer for results
|
||
|
const result = Buffer.alloc(data.length);
|
||
|
let pos = 0;
|
||
|
// process input data
|
||
|
for (let c of data) {
|
||
|
//c ^= keys.next();
|
||
|
//result[pos++] = c; // decode & Save Value
|
||
|
result[pos++] = keys.updateKeys(c ^ keys.next()); // update keys with decoded byte
|
||
|
}
|
||
|
return result;
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function make_encrypter(/*Buffer*/ pwd) {
|
||
|
// 1. Stage initialize key
|
||
|
const keys = new Initkeys(pwd);
|
||
|
|
||
|
// return encrypting function, result and pos is here so we dont have to merge buffers later
|
||
|
return function (/*Buffer*/ data, /*Buffer*/ result, /* Number */ pos = 0) {
|
||
|
// result - we create new Buffer for results
|
||
|
if (!result) result = Buffer.alloc(data.length);
|
||
|
// process input data
|
||
|
for (let c of data) {
|
||
|
const k = keys.next(); // save key byte
|
||
|
result[pos++] = c ^ k; // save val
|
||
|
keys.updateKeys(c); // update keys with decoded byte
|
||
|
}
|
||
|
return result;
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function decrypt(/*Buffer*/ data, /*Object*/ header, /*String, Buffer*/ pwd) {
|
||
|
if (!data || !Buffer.isBuffer(data) || data.length < 12) {
|
||
|
return Buffer.alloc(0);
|
||
|
}
|
||
|
|
||
|
// 1. We Initialize and generate decrypting function
|
||
|
const decrypter = make_decrypter(pwd);
|
||
|
|
||
|
// 2. decrypt salt what is always 12 bytes and is a part of file content
|
||
|
const salt = decrypter(data.slice(0, 12));
|
||
|
|
||
|
// 3. does password meet expectations
|
||
|
if (salt[11] !== header.crc >>> 24) {
|
||
|
throw "ADM-ZIP: Wrong Password";
|
||
|
}
|
||
|
|
||
|
// 4. decode content
|
||
|
return decrypter(data.slice(12));
|
||
|
}
|
||
|
|
||
|
// lets add way to populate salt, NOT RECOMMENDED for production but maybe useful for testing general functionality
|
||
|
function _salter(data) {
|
||
|
if (Buffer.isBuffer(data) && data.length >= 12) {
|
||
|
// be aware - currently salting buffer data is modified
|
||
|
config.genSalt = function () {
|
||
|
return data.slice(0, 12);
|
||
|
};
|
||
|
} else if (data === "node") {
|
||
|
// test salt generation with node random function
|
||
|
config.genSalt = genSalt.node;
|
||
|
} else {
|
||
|
// if value is not acceptable config gets reset.
|
||
|
config.genSalt = genSalt;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function encrypt(/*Buffer*/ data, /*Object*/ header, /*String, Buffer*/ pwd, /*Boolean*/ oldlike = false) {
|
||
|
// 1. test data if data is not Buffer we make buffer from it
|
||
|
if (data == null) data = Buffer.alloc(0);
|
||
|
// if data is not buffer be make buffer from it
|
||
|
if (!Buffer.isBuffer(data)) data = Buffer.from(data.toString());
|
||
|
|
||
|
// 2. We Initialize and generate encrypting function
|
||
|
const encrypter = make_encrypter(pwd);
|
||
|
|
||
|
// 3. generate salt (12-bytes of random data)
|
||
|
const salt = config.genSalt();
|
||
|
salt[11] = (header.crc >>> 24) & 0xff;
|
||
|
|
||
|
// old implementations (before PKZip 2.04g) used two byte check
|
||
|
if (oldlike) salt[10] = (header.crc >>> 16) & 0xff;
|
||
|
|
||
|
// 4. create output
|
||
|
const result = Buffer.alloc(data.length + 12);
|
||
|
encrypter(salt, result);
|
||
|
|
||
|
// finally encode content
|
||
|
return encrypter(data, result, 12);
|
||
|
}
|
||
|
|
||
|
module.exports = { decrypt, encrypt, _salter };
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 55991:
|
||
|
/***/ ((module) => {
|
||
|
|
||
|
module.exports = {
|
||
|
/* The local file header */
|
||
|
LOCHDR : 30, // LOC header size
|
||
|
LOCSIG : 0x04034b50, // "PK\003\004"
|
||
|
LOCVER : 4, // version needed to extract
|
||
|
LOCFLG : 6, // general purpose bit flag
|
||
|
LOCHOW : 8, // compression method
|
||
|
LOCTIM : 10, // modification time (2 bytes time, 2 bytes date)
|
||
|
LOCCRC : 14, // uncompressed file crc-32 value
|
||
|
LOCSIZ : 18, // compressed size
|
||
|
LOCLEN : 22, // uncompressed size
|
||
|
LOCNAM : 26, // filename length
|
||
|
LOCEXT : 28, // extra field length
|
||
|
|
||
|
/* The Data descriptor */
|
||
|
EXTSIG : 0x08074b50, // "PK\007\008"
|
||
|
EXTHDR : 16, // EXT header size
|
||
|
EXTCRC : 4, // uncompressed file crc-32 value
|
||
|
EXTSIZ : 8, // compressed size
|
||
|
EXTLEN : 12, // uncompressed size
|
||
|
|
||
|
/* The central directory file header */
|
||
|
CENHDR : 46, // CEN header size
|
||
|
CENSIG : 0x02014b50, // "PK\001\002"
|
||
|
CENVEM : 4, // version made by
|
||
|
CENVER : 6, // version needed to extract
|
||
|
CENFLG : 8, // encrypt, decrypt flags
|
||
|
CENHOW : 10, // compression method
|
||
|
CENTIM : 12, // modification time (2 bytes time, 2 bytes date)
|
||
|
CENCRC : 16, // uncompressed file crc-32 value
|
||
|
CENSIZ : 20, // compressed size
|
||
|
CENLEN : 24, // uncompressed size
|
||
|
CENNAM : 28, // filename length
|
||
|
CENEXT : 30, // extra field length
|
||
|
CENCOM : 32, // file comment length
|
||
|
CENDSK : 34, // volume number start
|
||
|
CENATT : 36, // internal file attributes
|
||
|
CENATX : 38, // external file attributes (host system dependent)
|
||
|
CENOFF : 42, // LOC header offset
|
||
|
|
||
|
/* The entries in the end of central directory */
|
||
|
ENDHDR : 22, // END header size
|
||
|
ENDSIG : 0x06054b50, // "PK\005\006"
|
||
|
ENDSUB : 8, // number of entries on this disk
|
||
|
ENDTOT : 10, // total number of entries
|
||
|
ENDSIZ : 12, // central directory size in bytes
|
||
|
ENDOFF : 16, // offset of first CEN header
|
||
|
ENDCOM : 20, // zip file comment length
|
||
|
|
||
|
END64HDR : 20, // zip64 END header size
|
||
|
END64SIG : 0x07064b50, // zip64 Locator signature, "PK\006\007"
|
||
|
END64START : 4, // number of the disk with the start of the zip64
|
||
|
END64OFF : 8, // relative offset of the zip64 end of central directory
|
||
|
END64NUMDISKS : 16, // total number of disks
|
||
|
|
||
|
ZIP64SIG : 0x06064b50, // zip64 signature, "PK\006\006"
|
||
|
ZIP64HDR : 56, // zip64 record minimum size
|
||
|
ZIP64LEAD : 12, // leading bytes at the start of the record, not counted by the value stored in ZIP64SIZE
|
||
|
ZIP64SIZE : 4, // zip64 size of the central directory record
|
||
|
ZIP64VEM : 12, // zip64 version made by
|
||
|
ZIP64VER : 14, // zip64 version needed to extract
|
||
|
ZIP64DSK : 16, // zip64 number of this disk
|
||
|
ZIP64DSKDIR : 20, // number of the disk with the start of the record directory
|
||
|
ZIP64SUB : 24, // number of entries on this disk
|
||
|
ZIP64TOT : 32, // total number of entries
|
||
|
ZIP64SIZB : 40, // zip64 central directory size in bytes
|
||
|
ZIP64OFF : 48, // offset of start of central directory with respect to the starting disk number
|
||
|
ZIP64EXTRA : 56, // extensible data sector
|
||
|
|
||
|
/* Compression methods */
|
||
|
STORED : 0, // no compression
|
||
|
SHRUNK : 1, // shrunk
|
||
|
REDUCED1 : 2, // reduced with compression factor 1
|
||
|
REDUCED2 : 3, // reduced with compression factor 2
|
||
|
REDUCED3 : 4, // reduced with compression factor 3
|
||
|
REDUCED4 : 5, // reduced with compression factor 4
|
||
|
IMPLODED : 6, // imploded
|
||
|
// 7 reserved for Tokenizing compression algorithm
|
||
|
DEFLATED : 8, // deflated
|
||
|
ENHANCED_DEFLATED: 9, // enhanced deflated
|
||
|
PKWARE : 10,// PKWare DCL imploded
|
||
|
// 11 reserved by PKWARE
|
||
|
BZIP2 : 12, // compressed using BZIP2
|
||
|
// 13 reserved by PKWARE
|
||
|
LZMA : 14, // LZMA
|
||
|
// 15-17 reserved by PKWARE
|
||
|
IBM_TERSE : 18, // compressed using IBM TERSE
|
||
|
IBM_LZ77 : 19, // IBM LZ77 z
|
||
|
AES_ENCRYPT : 99, // WinZIP AES encryption method
|
||
|
|
||
|
/* General purpose bit flag */
|
||
|
// values can obtained with expression 2**bitnr
|
||
|
FLG_ENC : 1, // Bit 0: encrypted file
|
||
|
FLG_COMP1 : 2, // Bit 1, compression option
|
||
|
FLG_COMP2 : 4, // Bit 2, compression option
|
||
|
FLG_DESC : 8, // Bit 3, data descriptor
|
||
|
FLG_ENH : 16, // Bit 4, enhanced deflating
|
||
|
FLG_PATCH : 32, // Bit 5, indicates that the file is compressed patched data.
|
||
|
FLG_STR : 64, // Bit 6, strong encryption (patented)
|
||
|
// Bits 7-10: Currently unused.
|
||
|
FLG_EFS : 2048, // Bit 11: Language encoding flag (EFS)
|
||
|
// Bit 12: Reserved by PKWARE for enhanced compression.
|
||
|
// Bit 13: encrypted the Central Directory (patented).
|
||
|
// Bits 14-15: Reserved by PKWARE.
|
||
|
FLG_MSK : 4096, // mask header values
|
||
|
|
||
|
/* Load type */
|
||
|
FILE : 2,
|
||
|
BUFFER : 1,
|
||
|
NONE : 0,
|
||
|
|
||
|
/* 4.5 Extensible data fields */
|
||
|
EF_ID : 0,
|
||
|
EF_SIZE : 2,
|
||
|
|
||
|
/* Header IDs */
|
||
|
ID_ZIP64 : 0x0001,
|
||
|
ID_AVINFO : 0x0007,
|
||
|
ID_PFS : 0x0008,
|
||
|
ID_OS2 : 0x0009,
|
||
|
ID_NTFS : 0x000a,
|
||
|
ID_OPENVMS : 0x000c,
|
||
|
ID_UNIX : 0x000d,
|
||
|
ID_FORK : 0x000e,
|
||
|
ID_PATCH : 0x000f,
|
||
|
ID_X509_PKCS7 : 0x0014,
|
||
|
ID_X509_CERTID_F : 0x0015,
|
||
|
ID_X509_CERTID_C : 0x0016,
|
||
|
ID_STRONGENC : 0x0017,
|
||
|
ID_RECORD_MGT : 0x0018,
|
||
|
ID_X509_PKCS7_RL : 0x0019,
|
||
|
ID_IBM1 : 0x0065,
|
||
|
ID_IBM2 : 0x0066,
|
||
|
ID_POSZIP : 0x4690,
|
||
|
|
||
|
EF_ZIP64_OR_32 : 0xffffffff,
|
||
|
EF_ZIP64_OR_16 : 0xffff,
|
||
|
EF_ZIP64_SUNCOMP : 0,
|
||
|
EF_ZIP64_SCOMP : 8,
|
||
|
EF_ZIP64_RHO : 16,
|
||
|
EF_ZIP64_DSN : 24
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 12190:
|
||
|
/***/ ((module) => {
|
||
|
|
||
|
module.exports = {
|
||
|
/* Header error messages */
|
||
|
INVALID_LOC: "Invalid LOC header (bad signature)",
|
||
|
INVALID_CEN: "Invalid CEN header (bad signature)",
|
||
|
INVALID_END: "Invalid END header (bad signature)",
|
||
|
|
||
|
/* ZipEntry error messages*/
|
||
|
NO_DATA: "Nothing to decompress",
|
||
|
BAD_CRC: "CRC32 checksum failed",
|
||
|
FILE_IN_THE_WAY: "There is a file in the way: %s",
|
||
|
UNKNOWN_METHOD: "Invalid/unsupported compression method",
|
||
|
|
||
|
/* Inflater error messages */
|
||
|
AVAIL_DATA: "inflate::Available inflate data did not terminate",
|
||
|
INVALID_DISTANCE: "inflate::Invalid literal/length or distance code in fixed or dynamic block",
|
||
|
TO_MANY_CODES: "inflate::Dynamic block code description: too many length or distance codes",
|
||
|
INVALID_REPEAT_LEN: "inflate::Dynamic block code description: repeat more than specified lengths",
|
||
|
INVALID_REPEAT_FIRST: "inflate::Dynamic block code description: repeat lengths with no first length",
|
||
|
INCOMPLETE_CODES: "inflate::Dynamic block code description: code lengths codes incomplete",
|
||
|
INVALID_DYN_DISTANCE: "inflate::Dynamic block code description: invalid distance code lengths",
|
||
|
INVALID_CODES_LEN: "inflate::Dynamic block code description: invalid literal/length code lengths",
|
||
|
INVALID_STORE_BLOCK: "inflate::Stored block length did not match one's complement",
|
||
|
INVALID_BLOCK_TYPE: "inflate::Invalid block type (type == 3)",
|
||
|
|
||
|
/* ADM-ZIP error messages */
|
||
|
CANT_EXTRACT_FILE: "Could not extract the file",
|
||
|
CANT_OVERRIDE: "Target file already exists",
|
||
|
NO_ZIP: "No zip file was loaded",
|
||
|
NO_ENTRY: "Entry doesn't exist",
|
||
|
DIRECTORY_CONTENT_ERROR: "A directory cannot have content",
|
||
|
FILE_NOT_FOUND: "File not found: %s",
|
||
|
NOT_IMPLEMENTED: "Not implemented",
|
||
|
INVALID_FILENAME: "Invalid filename",
|
||
|
INVALID_FORMAT: "Invalid or unsupported zip format. No END header found"
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 13455:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
const fs = __webpack_require__(65147).require();
|
||
|
const pth = __webpack_require__(85622);
|
||
|
|
||
|
fs.existsSync = fs.existsSync || pth.existsSync;
|
||
|
|
||
|
module.exports = function (/*String*/ path) {
|
||
|
var _path = path || "",
|
||
|
_obj = newAttr(),
|
||
|
_stat = null;
|
||
|
|
||
|
function newAttr() {
|
||
|
return {
|
||
|
directory: false,
|
||
|
readonly: false,
|
||
|
hidden: false,
|
||
|
executable: false,
|
||
|
mtime: 0,
|
||
|
atime: 0
|
||
|
};
|
||
|
}
|
||
|
|
||
|
if (_path && fs.existsSync(_path)) {
|
||
|
_stat = fs.statSync(_path);
|
||
|
_obj.directory = _stat.isDirectory();
|
||
|
_obj.mtime = _stat.mtime;
|
||
|
_obj.atime = _stat.atime;
|
||
|
_obj.executable = (0o111 & _stat.mode) !== 0; // file is executable who ever har right not just owner
|
||
|
_obj.readonly = (0o200 & _stat.mode) === 0; // readonly if owner has no write right
|
||
|
_obj.hidden = pth.basename(_path)[0] === ".";
|
||
|
} else {
|
||
|
console.warn("Invalid path: " + _path);
|
||
|
}
|
||
|
|
||
|
return {
|
||
|
get directory() {
|
||
|
return _obj.directory;
|
||
|
},
|
||
|
|
||
|
get readOnly() {
|
||
|
return _obj.readonly;
|
||
|
},
|
||
|
|
||
|
get hidden() {
|
||
|
return _obj.hidden;
|
||
|
},
|
||
|
|
||
|
get mtime() {
|
||
|
return _obj.mtime;
|
||
|
},
|
||
|
|
||
|
get atime() {
|
||
|
return _obj.atime;
|
||
|
},
|
||
|
|
||
|
get executable() {
|
||
|
return _obj.executable;
|
||
|
},
|
||
|
|
||
|
decodeAttributes: function () {},
|
||
|
|
||
|
encodeAttributes: function () {},
|
||
|
|
||
|
toJSON: function () {
|
||
|
return {
|
||
|
path: _path,
|
||
|
isDirectory: _obj.directory,
|
||
|
isReadOnly: _obj.readonly,
|
||
|
isHidden: _obj.hidden,
|
||
|
isExecutable: _obj.executable,
|
||
|
mTime: _obj.mtime,
|
||
|
aTime: _obj.atime
|
||
|
};
|
||
|
},
|
||
|
|
||
|
toString: function () {
|
||
|
return JSON.stringify(this.toJSON(), null, "\t");
|
||
|
}
|
||
|
};
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 65147:
|
||
|
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
|
||
|
|
||
|
exports.require = function () {
|
||
|
if (typeof process === "object" && process.versions && process.versions["electron"]) {
|
||
|
try {
|
||
|
const originalFs = __webpack_require__(Object(function webpackMissingModule() { var e = new Error("Cannot find module 'original-fs'"); e.code = 'MODULE_NOT_FOUND'; throw e; }()));
|
||
|
if (Object.keys(originalFs).length > 0) {
|
||
|
return originalFs;
|
||
|
}
|
||
|
} catch (e) {}
|
||
|
}
|
||
|
return __webpack_require__(35747);
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 85173:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
module.exports = __webpack_require__(7646);
|
||
|
module.exports.Constants = __webpack_require__(55991);
|
||
|
module.exports.Errors = __webpack_require__(12190);
|
||
|
module.exports.FileAttr = __webpack_require__(13455);
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 7646:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
const fsystem = __webpack_require__(65147).require();
|
||
|
const pth = __webpack_require__(85622);
|
||
|
const Constants = __webpack_require__(55991);
|
||
|
const isWin = typeof process === "object" && "win32" === process.platform;
|
||
|
|
||
|
const is_Obj = (obj) => obj && typeof obj === "object";
|
||
|
|
||
|
// generate CRC32 lookup table
|
||
|
const crcTable = new Uint32Array(256).map((t, c) => {
|
||
|
for (let k = 0; k < 8; k++) {
|
||
|
if ((c & 1) !== 0) {
|
||
|
c = 0xedb88320 ^ (c >>> 1);
|
||
|
} else {
|
||
|
c >>>= 1;
|
||
|
}
|
||
|
}
|
||
|
return c >>> 0;
|
||
|
});
|
||
|
|
||
|
// UTILS functions
|
||
|
|
||
|
function Utils(opts) {
|
||
|
this.sep = pth.sep;
|
||
|
this.fs = fsystem;
|
||
|
|
||
|
if (is_Obj(opts)) {
|
||
|
// custom filesystem
|
||
|
if (is_Obj(opts.fs) && typeof opts.fs.statSync === "function") {
|
||
|
this.fs = opts.fs;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
module.exports = Utils;
|
||
|
|
||
|
// INSTANCED functions
|
||
|
|
||
|
Utils.prototype.makeDir = function (/*String*/ folder) {
|
||
|
const self = this;
|
||
|
|
||
|
// Sync - make directories tree
|
||
|
function mkdirSync(/*String*/ fpath) {
|
||
|
let resolvedPath = fpath.split(self.sep)[0];
|
||
|
fpath.split(self.sep).forEach(function (name) {
|
||
|
if (!name || name.substr(-1, 1) === ":") return;
|
||
|
resolvedPath += self.sep + name;
|
||
|
var stat;
|
||
|
try {
|
||
|
stat = self.fs.statSync(resolvedPath);
|
||
|
} catch (e) {
|
||
|
self.fs.mkdirSync(resolvedPath);
|
||
|
}
|
||
|
if (stat && stat.isFile()) throw Errors.FILE_IN_THE_WAY.replace("%s", resolvedPath);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
mkdirSync(folder);
|
||
|
};
|
||
|
|
||
|
Utils.prototype.writeFileTo = function (/*String*/ path, /*Buffer*/ content, /*Boolean*/ overwrite, /*Number*/ attr) {
|
||
|
const self = this;
|
||
|
if (self.fs.existsSync(path)) {
|
||
|
if (!overwrite) return false; // cannot overwrite
|
||
|
|
||
|
var stat = self.fs.statSync(path);
|
||
|
if (stat.isDirectory()) {
|
||
|
return false;
|
||
|
}
|
||
|
}
|
||
|
var folder = pth.dirname(path);
|
||
|
if (!self.fs.existsSync(folder)) {
|
||
|
self.makeDir(folder);
|
||
|
}
|
||
|
|
||
|
var fd;
|
||
|
try {
|
||
|
fd = self.fs.openSync(path, "w", 438); // 0666
|
||
|
} catch (e) {
|
||
|
self.fs.chmodSync(path, 438);
|
||
|
fd = self.fs.openSync(path, "w", 438);
|
||
|
}
|
||
|
if (fd) {
|
||
|
try {
|
||
|
self.fs.writeSync(fd, content, 0, content.length, 0);
|
||
|
} finally {
|
||
|
self.fs.closeSync(fd);
|
||
|
}
|
||
|
}
|
||
|
self.fs.chmodSync(path, attr || 438);
|
||
|
return true;
|
||
|
};
|
||
|
|
||
|
Utils.prototype.writeFileToAsync = function (/*String*/ path, /*Buffer*/ content, /*Boolean*/ overwrite, /*Number*/ attr, /*Function*/ callback) {
|
||
|
if (typeof attr === "function") {
|
||
|
callback = attr;
|
||
|
attr = undefined;
|
||
|
}
|
||
|
|
||
|
const self = this;
|
||
|
|
||
|
self.fs.exists(path, function (exist) {
|
||
|
if (exist && !overwrite) return callback(false);
|
||
|
|
||
|
self.fs.stat(path, function (err, stat) {
|
||
|
if (exist && stat.isDirectory()) {
|
||
|
return callback(false);
|
||
|
}
|
||
|
|
||
|
var folder = pth.dirname(path);
|
||
|
self.fs.exists(folder, function (exists) {
|
||
|
if (!exists) self.makeDir(folder);
|
||
|
|
||
|
self.fs.open(path, "w", 438, function (err, fd) {
|
||
|
if (err) {
|
||
|
self.fs.chmod(path, 438, function () {
|
||
|
self.fs.open(path, "w", 438, function (err, fd) {
|
||
|
self.fs.write(fd, content, 0, content.length, 0, function () {
|
||
|
self.fs.close(fd, function () {
|
||
|
self.fs.chmod(path, attr || 438, function () {
|
||
|
callback(true);
|
||
|
});
|
||
|
});
|
||
|
});
|
||
|
});
|
||
|
});
|
||
|
} else if (fd) {
|
||
|
self.fs.write(fd, content, 0, content.length, 0, function () {
|
||
|
self.fs.close(fd, function () {
|
||
|
self.fs.chmod(path, attr || 438, function () {
|
||
|
callback(true);
|
||
|
});
|
||
|
});
|
||
|
});
|
||
|
} else {
|
||
|
self.fs.chmod(path, attr || 438, function () {
|
||
|
callback(true);
|
||
|
});
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
});
|
||
|
});
|
||
|
};
|
||
|
|
||
|
Utils.prototype.findFiles = function (/*String*/ path) {
|
||
|
const self = this;
|
||
|
|
||
|
function findSync(/*String*/ dir, /*RegExp*/ pattern, /*Boolean*/ recursive) {
|
||
|
if (typeof pattern === "boolean") {
|
||
|
recursive = pattern;
|
||
|
pattern = undefined;
|
||
|
}
|
||
|
let files = [];
|
||
|
self.fs.readdirSync(dir).forEach(function (file) {
|
||
|
var path = pth.join(dir, file);
|
||
|
|
||
|
if (self.fs.statSync(path).isDirectory() && recursive) files = files.concat(findSync(path, pattern, recursive));
|
||
|
|
||
|
if (!pattern || pattern.test(path)) {
|
||
|
files.push(pth.normalize(path) + (self.fs.statSync(path).isDirectory() ? self.sep : ""));
|
||
|
}
|
||
|
});
|
||
|
return files;
|
||
|
}
|
||
|
|
||
|
return findSync(path, undefined, true);
|
||
|
};
|
||
|
|
||
|
Utils.prototype.getAttributes = function () {};
|
||
|
|
||
|
Utils.prototype.setAttributes = function () {};
|
||
|
|
||
|
// STATIC functions
|
||
|
|
||
|
// crc32 single update (it is part of crc32)
|
||
|
Utils.crc32update = function (crc, byte) {
|
||
|
return crcTable[(crc ^ byte) & 0xff] ^ (crc >>> 8);
|
||
|
};
|
||
|
|
||
|
Utils.crc32 = function (buf) {
|
||
|
if (typeof buf === "string") {
|
||
|
buf = Buffer.from(buf, "utf8");
|
||
|
}
|
||
|
// Generate crcTable
|
||
|
if (!crcTable.length) genCRCTable();
|
||
|
|
||
|
let len = buf.length;
|
||
|
let crc = ~0;
|
||
|
for (let off = 0; off < len; ) crc = Utils.crc32update(crc, buf[off++]);
|
||
|
// xor and cast as uint32 number
|
||
|
return ~crc >>> 0;
|
||
|
};
|
||
|
|
||
|
Utils.methodToString = function (/*Number*/ method) {
|
||
|
switch (method) {
|
||
|
case Constants.STORED:
|
||
|
return "STORED (" + method + ")";
|
||
|
case Constants.DEFLATED:
|
||
|
return "DEFLATED (" + method + ")";
|
||
|
default:
|
||
|
return "UNSUPPORTED (" + method + ")";
|
||
|
}
|
||
|
};
|
||
|
|
||
|
// removes ".." style path elements
|
||
|
Utils.canonical = function (/*string*/ path) {
|
||
|
if (!path) return "";
|
||
|
// trick normalize think path is absolute
|
||
|
var safeSuffix = pth.posix.normalize("/" + path.split("\\").join("/"));
|
||
|
return pth.join(".", safeSuffix);
|
||
|
};
|
||
|
|
||
|
// make abolute paths taking prefix as root folder
|
||
|
Utils.sanitize = function (/*string*/ prefix, /*string*/ name) {
|
||
|
prefix = pth.resolve(pth.normalize(prefix));
|
||
|
var parts = name.split("/");
|
||
|
for (var i = 0, l = parts.length; i < l; i++) {
|
||
|
var path = pth.normalize(pth.join(prefix, parts.slice(i, l).join(pth.sep)));
|
||
|
if (path.indexOf(prefix) === 0) {
|
||
|
return path;
|
||
|
}
|
||
|
}
|
||
|
return pth.normalize(pth.join(prefix, pth.basename(name)));
|
||
|
};
|
||
|
|
||
|
// converts buffer, Uint8Array, string types to buffer
|
||
|
Utils.toBuffer = function toBuffer(/*buffer, Uint8Array, string*/ input) {
|
||
|
if (Buffer.isBuffer(input)) {
|
||
|
return input;
|
||
|
} else if (input instanceof Uint8Array) {
|
||
|
return Buffer.from(input);
|
||
|
} else {
|
||
|
// expect string all other values are invalid and return empty buffer
|
||
|
return typeof input === "string" ? Buffer.from(input, "utf8") : Buffer.alloc(0);
|
||
|
}
|
||
|
};
|
||
|
|
||
|
Utils.readBigUInt64LE = function (/*Buffer*/ buffer, /*int*/ index) {
|
||
|
var slice = Buffer.from(buffer.slice(index, index + 8));
|
||
|
slice.swap64();
|
||
|
|
||
|
return parseInt(`0x${slice.toString("hex")}`);
|
||
|
};
|
||
|
|
||
|
Utils.isWin = isWin; // Do we have windows system
|
||
|
Utils.crcTable = crcTable;
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 47396:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
var Utils = __webpack_require__(85173),
|
||
|
Headers = __webpack_require__(53854),
|
||
|
Constants = Utils.Constants,
|
||
|
Methods = __webpack_require__(81004);
|
||
|
|
||
|
module.exports = function (/*Buffer*/ input) {
|
||
|
var _entryHeader = new Headers.EntryHeader(),
|
||
|
_entryName = Buffer.alloc(0),
|
||
|
_comment = Buffer.alloc(0),
|
||
|
_isDirectory = false,
|
||
|
uncompressedData = null,
|
||
|
_extra = Buffer.alloc(0);
|
||
|
|
||
|
function getCompressedDataFromZip() {
|
||
|
if (!input || !Buffer.isBuffer(input)) {
|
||
|
return Buffer.alloc(0);
|
||
|
}
|
||
|
_entryHeader.loadDataHeaderFromBinary(input);
|
||
|
return input.slice(_entryHeader.realDataOffset, _entryHeader.realDataOffset + _entryHeader.compressedSize);
|
||
|
}
|
||
|
|
||
|
function crc32OK(data) {
|
||
|
// if bit 3 (0x08) of the general-purpose flags field is set, then the CRC-32 and file sizes are not known when the header is written
|
||
|
if ((_entryHeader.flags & 0x8) !== 0x8) {
|
||
|
if (Utils.crc32(data) !== _entryHeader.dataHeader.crc) {
|
||
|
return false;
|
||
|
}
|
||
|
} else {
|
||
|
// @TODO: load and check data descriptor header
|
||
|
// The fields in the local header are filled with zero, and the CRC-32 and size are appended in a 12-byte structure
|
||
|
// (optionally preceded by a 4-byte signature) immediately after the compressed data:
|
||
|
}
|
||
|
return true;
|
||
|
}
|
||
|
|
||
|
function decompress(/*Boolean*/ async, /*Function*/ callback, /*String, Buffer*/ pass) {
|
||
|
if (typeof callback === "undefined" && typeof async === "string") {
|
||
|
pass = async;
|
||
|
async = void 0;
|
||
|
}
|
||
|
if (_isDirectory) {
|
||
|
if (async && callback) {
|
||
|
callback(Buffer.alloc(0), Utils.Errors.DIRECTORY_CONTENT_ERROR); //si added error.
|
||
|
}
|
||
|
return Buffer.alloc(0);
|
||
|
}
|
||
|
|
||
|
var compressedData = getCompressedDataFromZip();
|
||
|
|
||
|
if (compressedData.length === 0) {
|
||
|
// File is empty, nothing to decompress.
|
||
|
if (async && callback) callback(compressedData);
|
||
|
return compressedData;
|
||
|
}
|
||
|
|
||
|
if (_entryHeader.encripted) {
|
||
|
if ("string" !== typeof pass && !Buffer.isBuffer(pass)) {
|
||
|
throw new Error("ADM-ZIP: Incompatible password parameter");
|
||
|
}
|
||
|
compressedData = Methods.ZipCrypto.decrypt(compressedData, _entryHeader, pass);
|
||
|
}
|
||
|
|
||
|
var data = Buffer.alloc(_entryHeader.size);
|
||
|
|
||
|
switch (_entryHeader.method) {
|
||
|
case Utils.Constants.STORED:
|
||
|
compressedData.copy(data);
|
||
|
if (!crc32OK(data)) {
|
||
|
if (async && callback) callback(data, Utils.Errors.BAD_CRC); //si added error
|
||
|
throw new Error(Utils.Errors.BAD_CRC);
|
||
|
} else {
|
||
|
//si added otherwise did not seem to return data.
|
||
|
if (async && callback) callback(data);
|
||
|
return data;
|
||
|
}
|
||
|
case Utils.Constants.DEFLATED:
|
||
|
var inflater = new Methods.Inflater(compressedData);
|
||
|
if (!async) {
|
||
|
const result = inflater.inflate(data);
|
||
|
result.copy(data, 0);
|
||
|
if (!crc32OK(data)) {
|
||
|
throw new Error(Utils.Errors.BAD_CRC + " " + _entryName.toString());
|
||
|
}
|
||
|
return data;
|
||
|
} else {
|
||
|
inflater.inflateAsync(function (result) {
|
||
|
result.copy(result, 0);
|
||
|
if (callback) {
|
||
|
if (!crc32OK(result)) {
|
||
|
callback(result, Utils.Errors.BAD_CRC); //si added error
|
||
|
} else {
|
||
|
callback(result);
|
||
|
}
|
||
|
}
|
||
|
});
|
||
|
}
|
||
|
break;
|
||
|
default:
|
||
|
if (async && callback) callback(Buffer.alloc(0), Utils.Errors.UNKNOWN_METHOD);
|
||
|
throw new Error(Utils.Errors.UNKNOWN_METHOD);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function compress(/*Boolean*/ async, /*Function*/ callback) {
|
||
|
if ((!uncompressedData || !uncompressedData.length) && Buffer.isBuffer(input)) {
|
||
|
// no data set or the data wasn't changed to require recompression
|
||
|
if (async && callback) callback(getCompressedDataFromZip());
|
||
|
return getCompressedDataFromZip();
|
||
|
}
|
||
|
|
||
|
if (uncompressedData.length && !_isDirectory) {
|
||
|
var compressedData;
|
||
|
// Local file header
|
||
|
switch (_entryHeader.method) {
|
||
|
case Utils.Constants.STORED:
|
||
|
_entryHeader.compressedSize = _entryHeader.size;
|
||
|
|
||
|
compressedData = Buffer.alloc(uncompressedData.length);
|
||
|
uncompressedData.copy(compressedData);
|
||
|
|
||
|
if (async && callback) callback(compressedData);
|
||
|
return compressedData;
|
||
|
default:
|
||
|
case Utils.Constants.DEFLATED:
|
||
|
var deflater = new Methods.Deflater(uncompressedData);
|
||
|
if (!async) {
|
||
|
var deflated = deflater.deflate();
|
||
|
_entryHeader.compressedSize = deflated.length;
|
||
|
return deflated;
|
||
|
} else {
|
||
|
deflater.deflateAsync(function (data) {
|
||
|
compressedData = Buffer.alloc(data.length);
|
||
|
_entryHeader.compressedSize = data.length;
|
||
|
data.copy(compressedData);
|
||
|
callback && callback(compressedData);
|
||
|
});
|
||
|
}
|
||
|
deflater = null;
|
||
|
break;
|
||
|
}
|
||
|
} else if (async && callback) {
|
||
|
callback(Buffer.alloc(0));
|
||
|
} else {
|
||
|
return Buffer.alloc(0);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function readUInt64LE(buffer, offset) {
|
||
|
return (buffer.readUInt32LE(offset + 4) << 4) + buffer.readUInt32LE(offset);
|
||
|
}
|
||
|
|
||
|
function parseExtra(data) {
|
||
|
var offset = 0;
|
||
|
var signature, size, part;
|
||
|
while (offset < data.length) {
|
||
|
signature = data.readUInt16LE(offset);
|
||
|
offset += 2;
|
||
|
size = data.readUInt16LE(offset);
|
||
|
offset += 2;
|
||
|
part = data.slice(offset, offset + size);
|
||
|
offset += size;
|
||
|
if (Constants.ID_ZIP64 === signature) {
|
||
|
parseZip64ExtendedInformation(part);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
//Override header field values with values from the ZIP64 extra field
|
||
|
function parseZip64ExtendedInformation(data) {
|
||
|
var size, compressedSize, offset, diskNumStart;
|
||
|
|
||
|
if (data.length >= Constants.EF_ZIP64_SCOMP) {
|
||
|
size = readUInt64LE(data, Constants.EF_ZIP64_SUNCOMP);
|
||
|
if (_entryHeader.size === Constants.EF_ZIP64_OR_32) {
|
||
|
_entryHeader.size = size;
|
||
|
}
|
||
|
}
|
||
|
if (data.length >= Constants.EF_ZIP64_RHO) {
|
||
|
compressedSize = readUInt64LE(data, Constants.EF_ZIP64_SCOMP);
|
||
|
if (_entryHeader.compressedSize === Constants.EF_ZIP64_OR_32) {
|
||
|
_entryHeader.compressedSize = compressedSize;
|
||
|
}
|
||
|
}
|
||
|
if (data.length >= Constants.EF_ZIP64_DSN) {
|
||
|
offset = readUInt64LE(data, Constants.EF_ZIP64_RHO);
|
||
|
if (_entryHeader.offset === Constants.EF_ZIP64_OR_32) {
|
||
|
_entryHeader.offset = offset;
|
||
|
}
|
||
|
}
|
||
|
if (data.length >= Constants.EF_ZIP64_DSN + 4) {
|
||
|
diskNumStart = data.readUInt32LE(Constants.EF_ZIP64_DSN);
|
||
|
if (_entryHeader.diskNumStart === Constants.EF_ZIP64_OR_16) {
|
||
|
_entryHeader.diskNumStart = diskNumStart;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return {
|
||
|
get entryName() {
|
||
|
return _entryName.toString();
|
||
|
},
|
||
|
get rawEntryName() {
|
||
|
return _entryName;
|
||
|
},
|
||
|
set entryName(val) {
|
||
|
_entryName = Utils.toBuffer(val);
|
||
|
var lastChar = _entryName[_entryName.length - 1];
|
||
|
_isDirectory = lastChar === 47 || lastChar === 92;
|
||
|
_entryHeader.fileNameLength = _entryName.length;
|
||
|
},
|
||
|
|
||
|
get extra() {
|
||
|
return _extra;
|
||
|
},
|
||
|
set extra(val) {
|
||
|
_extra = val;
|
||
|
_entryHeader.extraLength = val.length;
|
||
|
parseExtra(val);
|
||
|
},
|
||
|
|
||
|
get comment() {
|
||
|
return _comment.toString();
|
||
|
},
|
||
|
set comment(val) {
|
||
|
_comment = Utils.toBuffer(val);
|
||
|
_entryHeader.commentLength = _comment.length;
|
||
|
},
|
||
|
|
||
|
get name() {
|
||
|
var n = _entryName.toString();
|
||
|
return _isDirectory
|
||
|
? n
|
||
|
.substr(n.length - 1)
|
||
|
.split("/")
|
||
|
.pop()
|
||
|
: n.split("/").pop();
|
||
|
},
|
||
|
get isDirectory() {
|
||
|
return _isDirectory;
|
||
|
},
|
||
|
|
||
|
getCompressedData: function () {
|
||
|
return compress(false, null);
|
||
|
},
|
||
|
|
||
|
getCompressedDataAsync: function (/*Function*/ callback) {
|
||
|
compress(true, callback);
|
||
|
},
|
||
|
|
||
|
setData: function (value) {
|
||
|
uncompressedData = Utils.toBuffer(value);
|
||
|
if (!_isDirectory && uncompressedData.length) {
|
||
|
_entryHeader.size = uncompressedData.length;
|
||
|
_entryHeader.method = Utils.Constants.DEFLATED;
|
||
|
_entryHeader.crc = Utils.crc32(value);
|
||
|
_entryHeader.changed = true;
|
||
|
} else {
|
||
|
// folders and blank files should be stored
|
||
|
_entryHeader.method = Utils.Constants.STORED;
|
||
|
}
|
||
|
},
|
||
|
|
||
|
getData: function (pass) {
|
||
|
if (_entryHeader.changed) {
|
||
|
return uncompressedData;
|
||
|
} else {
|
||
|
return decompress(false, null, pass);
|
||
|
}
|
||
|
},
|
||
|
|
||
|
getDataAsync: function (/*Function*/ callback, pass) {
|
||
|
if (_entryHeader.changed) {
|
||
|
callback(uncompressedData);
|
||
|
} else {
|
||
|
decompress(true, callback, pass);
|
||
|
}
|
||
|
},
|
||
|
|
||
|
set attr(attr) {
|
||
|
_entryHeader.attr = attr;
|
||
|
},
|
||
|
get attr() {
|
||
|
return _entryHeader.attr;
|
||
|
},
|
||
|
|
||
|
set header(/*Buffer*/ data) {
|
||
|
_entryHeader.loadFromBinary(data);
|
||
|
},
|
||
|
|
||
|
get header() {
|
||
|
return _entryHeader;
|
||
|
},
|
||
|
|
||
|
packHeader: function () {
|
||
|
// 1. create header (buffer)
|
||
|
var header = _entryHeader.entryHeaderToBinary();
|
||
|
var addpos = Utils.Constants.CENHDR;
|
||
|
// 2. add file name
|
||
|
_entryName.copy(header, addpos);
|
||
|
addpos += _entryName.length;
|
||
|
// 3. add extra data
|
||
|
if (_entryHeader.extraLength) {
|
||
|
_extra.copy(header, addpos);
|
||
|
addpos += _entryHeader.extraLength;
|
||
|
}
|
||
|
// 4. add file comment
|
||
|
if (_entryHeader.commentLength) {
|
||
|
_comment.copy(header, addpos);
|
||
|
}
|
||
|
return header;
|
||
|
},
|
||
|
|
||
|
toJSON: function () {
|
||
|
const bytes = function (nr) {
|
||
|
return "<" + ((nr && nr.length + " bytes buffer") || "null") + ">";
|
||
|
};
|
||
|
|
||
|
return {
|
||
|
entryName: this.entryName,
|
||
|
name: this.name,
|
||
|
comment: this.comment,
|
||
|
isDirectory: this.isDirectory,
|
||
|
header: _entryHeader.toJSON(),
|
||
|
compressedData: bytes(input),
|
||
|
data: bytes(uncompressedData)
|
||
|
};
|
||
|
},
|
||
|
|
||
|
toString: function () {
|
||
|
return JSON.stringify(this.toJSON(), null, "\t");
|
||
|
}
|
||
|
};
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 56333:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
const ZipEntry = __webpack_require__(47396);
|
||
|
const Headers = __webpack_require__(53854);
|
||
|
const Utils = __webpack_require__(85173);
|
||
|
|
||
|
module.exports = function (/*Buffer|null*/ inBuffer, /** object */ options) {
|
||
|
var entryList = [],
|
||
|
entryTable = {},
|
||
|
_comment = Buffer.alloc(0),
|
||
|
mainHeader = new Headers.MainHeader(),
|
||
|
loadedEntries = false;
|
||
|
|
||
|
// assign options
|
||
|
const opts = Object.assign(Object.create(null), options);
|
||
|
|
||
|
const { noSort } = opts;
|
||
|
|
||
|
if (inBuffer) {
|
||
|
// is a memory buffer
|
||
|
readMainHeader(opts.readEntries);
|
||
|
} else {
|
||
|
// none. is a new file
|
||
|
loadedEntries = true;
|
||
|
}
|
||
|
|
||
|
function iterateEntries(callback) {
|
||
|
const totalEntries = mainHeader.diskEntries; // total number of entries
|
||
|
let index = mainHeader.offset; // offset of first CEN header
|
||
|
|
||
|
for (let i = 0; i < totalEntries; i++) {
|
||
|
let tmp = index;
|
||
|
const entry = new ZipEntry(inBuffer);
|
||
|
|
||
|
entry.header = inBuffer.slice(tmp, (tmp += Utils.Constants.CENHDR));
|
||
|
entry.entryName = inBuffer.slice(tmp, (tmp += entry.header.fileNameLength));
|
||
|
|
||
|
index += entry.header.entryHeaderSize;
|
||
|
|
||
|
callback(entry);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function readEntries() {
|
||
|
loadedEntries = true;
|
||
|
entryTable = {};
|
||
|
entryList = new Array(mainHeader.diskEntries); // total number of entries
|
||
|
var index = mainHeader.offset; // offset of first CEN header
|
||
|
for (var i = 0; i < entryList.length; i++) {
|
||
|
var tmp = index,
|
||
|
entry = new ZipEntry(inBuffer);
|
||
|
entry.header = inBuffer.slice(tmp, (tmp += Utils.Constants.CENHDR));
|
||
|
|
||
|
entry.entryName = inBuffer.slice(tmp, (tmp += entry.header.fileNameLength));
|
||
|
|
||
|
if (entry.header.extraLength) {
|
||
|
entry.extra = inBuffer.slice(tmp, (tmp += entry.header.extraLength));
|
||
|
}
|
||
|
|
||
|
if (entry.header.commentLength) entry.comment = inBuffer.slice(tmp, tmp + entry.header.commentLength);
|
||
|
|
||
|
index += entry.header.entryHeaderSize;
|
||
|
|
||
|
entryList[i] = entry;
|
||
|
entryTable[entry.entryName] = entry;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function readMainHeader(/*Boolean*/ readNow) {
|
||
|
var i = inBuffer.length - Utils.Constants.ENDHDR, // END header size
|
||
|
max = Math.max(0, i - 0xffff), // 0xFFFF is the max zip file comment length
|
||
|
n = max,
|
||
|
endStart = inBuffer.length,
|
||
|
endOffset = -1, // Start offset of the END header
|
||
|
commentEnd = 0;
|
||
|
|
||
|
for (i; i >= n; i--) {
|
||
|
if (inBuffer[i] !== 0x50) continue; // quick check that the byte is 'P'
|
||
|
if (inBuffer.readUInt32LE(i) === Utils.Constants.ENDSIG) {
|
||
|
// "PK\005\006"
|
||
|
endOffset = i;
|
||
|
commentEnd = i;
|
||
|
endStart = i + Utils.Constants.ENDHDR;
|
||
|
// We already found a regular signature, let's look just a bit further to check if there's any zip64 signature
|
||
|
n = i - Utils.Constants.END64HDR;
|
||
|
continue;
|
||
|
}
|
||
|
|
||
|
if (inBuffer.readUInt32LE(i) === Utils.Constants.END64SIG) {
|
||
|
// Found a zip64 signature, let's continue reading the whole zip64 record
|
||
|
n = max;
|
||
|
continue;
|
||
|
}
|
||
|
|
||
|
if (inBuffer.readUInt32LE(i) === Utils.Constants.ZIP64SIG) {
|
||
|
// Found the zip64 record, let's determine it's size
|
||
|
endOffset = i;
|
||
|
endStart = i + Utils.readBigUInt64LE(inBuffer, i + Utils.Constants.ZIP64SIZE) + Utils.Constants.ZIP64LEAD;
|
||
|
break;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
if (!~endOffset) throw new Error(Utils.Errors.INVALID_FORMAT);
|
||
|
|
||
|
mainHeader.loadFromBinary(inBuffer.slice(endOffset, endStart));
|
||
|
if (mainHeader.commentLength) {
|
||
|
_comment = inBuffer.slice(commentEnd + Utils.Constants.ENDHDR);
|
||
|
}
|
||
|
if (readNow) readEntries();
|
||
|
}
|
||
|
|
||
|
function sortEntries() {
|
||
|
if (entryList.length > 1 && !noSort) {
|
||
|
entryList.sort((a, b) => a.entryName.toLowerCase().localeCompare(b.entryName.toLowerCase()));
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return {
|
||
|
/**
|
||
|
* Returns an array of ZipEntry objects existent in the current opened archive
|
||
|
* @return Array
|
||
|
*/
|
||
|
get entries() {
|
||
|
if (!loadedEntries) {
|
||
|
readEntries();
|
||
|
}
|
||
|
return entryList;
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Archive comment
|
||
|
* @return {String}
|
||
|
*/
|
||
|
get comment() {
|
||
|
return _comment.toString();
|
||
|
},
|
||
|
set comment(val) {
|
||
|
_comment = Utils.toBuffer(val);
|
||
|
mainHeader.commentLength = _comment.length;
|
||
|
},
|
||
|
|
||
|
getEntryCount: function () {
|
||
|
if (!loadedEntries) {
|
||
|
return mainHeader.diskEntries;
|
||
|
}
|
||
|
|
||
|
return entryList.length;
|
||
|
},
|
||
|
|
||
|
forEach: function (callback) {
|
||
|
if (!loadedEntries) {
|
||
|
iterateEntries(callback);
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
entryList.forEach(callback);
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Returns a reference to the entry with the given name or null if entry is inexistent
|
||
|
*
|
||
|
* @param entryName
|
||
|
* @return ZipEntry
|
||
|
*/
|
||
|
getEntry: function (/*String*/ entryName) {
|
||
|
if (!loadedEntries) {
|
||
|
readEntries();
|
||
|
}
|
||
|
return entryTable[entryName] || null;
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Adds the given entry to the entry list
|
||
|
*
|
||
|
* @param entry
|
||
|
*/
|
||
|
setEntry: function (/*ZipEntry*/ entry) {
|
||
|
if (!loadedEntries) {
|
||
|
readEntries();
|
||
|
}
|
||
|
entryList.push(entry);
|
||
|
entryTable[entry.entryName] = entry;
|
||
|
mainHeader.totalEntries = entryList.length;
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Removes the entry with the given name from the entry list.
|
||
|
*
|
||
|
* If the entry is a directory, then all nested files and directories will be removed
|
||
|
* @param entryName
|
||
|
*/
|
||
|
deleteEntry: function (/*String*/ entryName) {
|
||
|
if (!loadedEntries) {
|
||
|
readEntries();
|
||
|
}
|
||
|
var entry = entryTable[entryName];
|
||
|
if (entry && entry.isDirectory) {
|
||
|
var _self = this;
|
||
|
this.getEntryChildren(entry).forEach(function (child) {
|
||
|
if (child.entryName !== entryName) {
|
||
|
_self.deleteEntry(child.entryName);
|
||
|
}
|
||
|
});
|
||
|
}
|
||
|
entryList.splice(entryList.indexOf(entry), 1);
|
||
|
delete entryTable[entryName];
|
||
|
mainHeader.totalEntries = entryList.length;
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Iterates and returns all nested files and directories of the given entry
|
||
|
*
|
||
|
* @param entry
|
||
|
* @return Array
|
||
|
*/
|
||
|
getEntryChildren: function (/*ZipEntry*/ entry) {
|
||
|
if (!loadedEntries) {
|
||
|
readEntries();
|
||
|
}
|
||
|
if (entry && entry.isDirectory) {
|
||
|
const list = [];
|
||
|
const name = entry.entryName;
|
||
|
const len = name.length;
|
||
|
|
||
|
entryList.forEach(function (zipEntry) {
|
||
|
if (zipEntry.entryName.substr(0, len) === name) {
|
||
|
list.push(zipEntry);
|
||
|
}
|
||
|
});
|
||
|
return list;
|
||
|
}
|
||
|
return [];
|
||
|
},
|
||
|
|
||
|
/**
|
||
|
* Returns the zip file
|
||
|
*
|
||
|
* @return Buffer
|
||
|
*/
|
||
|
compressToBuffer: function () {
|
||
|
if (!loadedEntries) {
|
||
|
readEntries();
|
||
|
}
|
||
|
sortEntries();
|
||
|
|
||
|
const dataBlock = [];
|
||
|
const entryHeaders = [];
|
||
|
let totalSize = 0;
|
||
|
let dindex = 0;
|
||
|
|
||
|
mainHeader.size = 0;
|
||
|
mainHeader.offset = 0;
|
||
|
|
||
|
for (const entry of entryList) {
|
||
|
// compress data and set local and entry header accordingly. Reason why is called first
|
||
|
const compressedData = entry.getCompressedData();
|
||
|
// 1. construct data header
|
||
|
entry.header.offset = dindex;
|
||
|
const dataHeader = entry.header.dataHeaderToBinary();
|
||
|
const entryNameLen = entry.rawEntryName.length;
|
||
|
// 1.2. postheader - data after data header
|
||
|
const postHeader = Buffer.alloc(entryNameLen + entry.extra.length);
|
||
|
entry.rawEntryName.copy(postHeader, 0);
|
||
|
postHeader.copy(entry.extra, entryNameLen);
|
||
|
|
||
|
// 2. offsets
|
||
|
const dataLength = dataHeader.length + postHeader.length + compressedData.length;
|
||
|
dindex += dataLength;
|
||
|
|
||
|
// 3. store values in sequence
|
||
|
dataBlock.push(dataHeader);
|
||
|
dataBlock.push(postHeader);
|
||
|
dataBlock.push(compressedData);
|
||
|
|
||
|
// 4. construct entry header
|
||
|
const entryHeader = entry.packHeader();
|
||
|
entryHeaders.push(entryHeader);
|
||
|
// 5. update main header
|
||
|
mainHeader.size += entryHeader.length;
|
||
|
totalSize += dataLength + entryHeader.length;
|
||
|
}
|
||
|
|
||
|
totalSize += mainHeader.mainHeaderSize; // also includes zip file comment length
|
||
|
// point to end of data and beginning of central directory first record
|
||
|
mainHeader.offset = dindex;
|
||
|
|
||
|
dindex = 0;
|
||
|
const outBuffer = Buffer.alloc(totalSize);
|
||
|
// write data blocks
|
||
|
for (const content of dataBlock) {
|
||
|
content.copy(outBuffer, dindex);
|
||
|
dindex += content.length;
|
||
|
}
|
||
|
|
||
|
// write central directory entries
|
||
|
for (const content of entryHeaders) {
|
||
|
content.copy(outBuffer, dindex);
|
||
|
dindex += content.length;
|
||
|
}
|
||
|
|
||
|
// write main header
|
||
|
const mh = mainHeader.toBinary();
|
||
|
if (_comment) {
|
||
|
_comment.copy(mh, Utils.Constants.ENDHDR); // add zip file comment
|
||
|
}
|
||
|
mh.copy(outBuffer, dindex);
|
||
|
|
||
|
return outBuffer;
|
||
|
},
|
||
|
|
||
|
toAsyncBuffer: function (/*Function*/ onSuccess, /*Function*/ onFail, /*Function*/ onItemStart, /*Function*/ onItemEnd) {
|
||
|
try {
|
||
|
if (!loadedEntries) {
|
||
|
readEntries();
|
||
|
}
|
||
|
sortEntries();
|
||
|
|
||
|
const dataBlock = [];
|
||
|
const entryHeaders = [];
|
||
|
let totalSize = 0;
|
||
|
let dindex = 0;
|
||
|
|
||
|
mainHeader.size = 0;
|
||
|
mainHeader.offset = 0;
|
||
|
|
||
|
const compress2Buffer = function (entryLists) {
|
||
|
if (entryLists.length) {
|
||
|
const entry = entryLists.pop();
|
||
|
const name = entry.entryName + entry.extra.toString();
|
||
|
if (onItemStart) onItemStart(name);
|
||
|
entry.getCompressedDataAsync(function (compressedData) {
|
||
|
if (onItemEnd) onItemEnd(name);
|
||
|
|
||
|
entry.header.offset = dindex;
|
||
|
// data header
|
||
|
const dataHeader = entry.header.dataHeaderToBinary();
|
||
|
const postHeader = Buffer.alloc(name.length, name);
|
||
|
const dataLength = dataHeader.length + postHeader.length + compressedData.length;
|
||
|
|
||
|
dindex += dataLength;
|
||
|
|
||
|
dataBlock.push(dataHeader);
|
||
|
dataBlock.push(postHeader);
|
||
|
dataBlock.push(compressedData);
|
||
|
|
||
|
const entryHeader = entry.packHeader();
|
||
|
entryHeaders.push(entryHeader);
|
||
|
mainHeader.size += entryHeader.length;
|
||
|
totalSize += dataLength + entryHeader.length;
|
||
|
|
||
|
compress2Buffer(entryLists);
|
||
|
});
|
||
|
} else {
|
||
|
totalSize += mainHeader.mainHeaderSize; // also includes zip file comment length
|
||
|
// point to end of data and beginning of central directory first record
|
||
|
mainHeader.offset = dindex;
|
||
|
|
||
|
dindex = 0;
|
||
|
const outBuffer = Buffer.alloc(totalSize);
|
||
|
dataBlock.forEach(function (content) {
|
||
|
content.copy(outBuffer, dindex); // write data blocks
|
||
|
dindex += content.length;
|
||
|
});
|
||
|
entryHeaders.forEach(function (content) {
|
||
|
content.copy(outBuffer, dindex); // write central directory entries
|
||
|
dindex += content.length;
|
||
|
});
|
||
|
|
||
|
const mh = mainHeader.toBinary();
|
||
|
if (_comment) {
|
||
|
_comment.copy(mh, Utils.Constants.ENDHDR); // add zip file comment
|
||
|
}
|
||
|
|
||
|
mh.copy(outBuffer, dindex); // write main header
|
||
|
|
||
|
onSuccess(outBuffer);
|
||
|
}
|
||
|
};
|
||
|
|
||
|
compress2Buffer(entryList);
|
||
|
} catch (e) {
|
||
|
onFail(e);
|
||
|
}
|
||
|
}
|
||
|
};
|
||
|
};
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 9668:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
|
||
|
|
||
|
const { Buffer } = __webpack_require__(64293)
|
||
|
const symbol = Symbol.for('BufferList')
|
||
|
|
||
|
function BufferList (buf) {
|
||
|
if (!(this instanceof BufferList)) {
|
||
|
return new BufferList(buf)
|
||
|
}
|
||
|
|
||
|
BufferList._init.call(this, buf)
|
||
|
}
|
||
|
|
||
|
BufferList._init = function _init (buf) {
|
||
|
Object.defineProperty(this, symbol, { value: true })
|
||
|
|
||
|
this._bufs = []
|
||
|
this.length = 0
|
||
|
|
||
|
if (buf) {
|
||
|
this.append(buf)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
BufferList.prototype._new = function _new (buf) {
|
||
|
return new BufferList(buf)
|
||
|
}
|
||
|
|
||
|
BufferList.prototype._offset = function _offset (offset) {
|
||
|
if (offset === 0) {
|
||
|
return [0, 0]
|
||
|
}
|
||
|
|
||
|
let tot = 0
|
||
|
|
||
|
for (let i = 0; i < this._bufs.length; i++) {
|
||
|
const _t = tot + this._bufs[i].length
|
||
|
if (offset < _t || i === this._bufs.length - 1) {
|
||
|
return [i, offset - tot]
|
||
|
}
|
||
|
tot = _t
|
||
|
}
|
||
|
}
|
||
|
|
||
|
BufferList.prototype._reverseOffset = function (blOffset) {
|
||
|
const bufferId = blOffset[0]
|
||
|
let offset = blOffset[1]
|
||
|
|
||
|
for (let i = 0; i < bufferId; i++) {
|
||
|
offset += this._bufs[i].length
|
||
|
}
|
||
|
|
||
|
return offset
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.get = function get (index) {
|
||
|
if (index > this.length || index < 0) {
|
||
|
return undefined
|
||
|
}
|
||
|
|
||
|
const offset = this._offset(index)
|
||
|
|
||
|
return this._bufs[offset[0]][offset[1]]
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.slice = function slice (start, end) {
|
||
|
if (typeof start === 'number' && start < 0) {
|
||
|
start += this.length
|
||
|
}
|
||
|
|
||
|
if (typeof end === 'number' && end < 0) {
|
||
|
end += this.length
|
||
|
}
|
||
|
|
||
|
return this.copy(null, 0, start, end)
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.copy = function copy (dst, dstStart, srcStart, srcEnd) {
|
||
|
if (typeof srcStart !== 'number' || srcStart < 0) {
|
||
|
srcStart = 0
|
||
|
}
|
||
|
|
||
|
if (typeof srcEnd !== 'number' || srcEnd > this.length) {
|
||
|
srcEnd = this.length
|
||
|
}
|
||
|
|
||
|
if (srcStart >= this.length) {
|
||
|
return dst || Buffer.alloc(0)
|
||
|
}
|
||
|
|
||
|
if (srcEnd <= 0) {
|
||
|
return dst || Buffer.alloc(0)
|
||
|
}
|
||
|
|
||
|
const copy = !!dst
|
||
|
const off = this._offset(srcStart)
|
||
|
const len = srcEnd - srcStart
|
||
|
let bytes = len
|
||
|
let bufoff = (copy && dstStart) || 0
|
||
|
let start = off[1]
|
||
|
|
||
|
// copy/slice everything
|
||
|
if (srcStart === 0 && srcEnd === this.length) {
|
||
|
if (!copy) {
|
||
|
// slice, but full concat if multiple buffers
|
||
|
return this._bufs.length === 1
|
||
|
? this._bufs[0]
|
||
|
: Buffer.concat(this._bufs, this.length)
|
||
|
}
|
||
|
|
||
|
// copy, need to copy individual buffers
|
||
|
for (let i = 0; i < this._bufs.length; i++) {
|
||
|
this._bufs[i].copy(dst, bufoff)
|
||
|
bufoff += this._bufs[i].length
|
||
|
}
|
||
|
|
||
|
return dst
|
||
|
}
|
||
|
|
||
|
// easy, cheap case where it's a subset of one of the buffers
|
||
|
if (bytes <= this._bufs[off[0]].length - start) {
|
||
|
return copy
|
||
|
? this._bufs[off[0]].copy(dst, dstStart, start, start + bytes)
|
||
|
: this._bufs[off[0]].slice(start, start + bytes)
|
||
|
}
|
||
|
|
||
|
if (!copy) {
|
||
|
// a slice, we need something to copy in to
|
||
|
dst = Buffer.allocUnsafe(len)
|
||
|
}
|
||
|
|
||
|
for (let i = off[0]; i < this._bufs.length; i++) {
|
||
|
const l = this._bufs[i].length - start
|
||
|
|
||
|
if (bytes > l) {
|
||
|
this._bufs[i].copy(dst, bufoff, start)
|
||
|
bufoff += l
|
||
|
} else {
|
||
|
this._bufs[i].copy(dst, bufoff, start, start + bytes)
|
||
|
bufoff += l
|
||
|
break
|
||
|
}
|
||
|
|
||
|
bytes -= l
|
||
|
|
||
|
if (start) {
|
||
|
start = 0
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// safeguard so that we don't return uninitialized memory
|
||
|
if (dst.length > bufoff) return dst.slice(0, bufoff)
|
||
|
|
||
|
return dst
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.shallowSlice = function shallowSlice (start, end) {
|
||
|
start = start || 0
|
||
|
end = typeof end !== 'number' ? this.length : end
|
||
|
|
||
|
if (start < 0) {
|
||
|
start += this.length
|
||
|
}
|
||
|
|
||
|
if (end < 0) {
|
||
|
end += this.length
|
||
|
}
|
||
|
|
||
|
if (start === end) {
|
||
|
return this._new()
|
||
|
}
|
||
|
|
||
|
const startOffset = this._offset(start)
|
||
|
const endOffset = this._offset(end)
|
||
|
const buffers = this._bufs.slice(startOffset[0], endOffset[0] + 1)
|
||
|
|
||
|
if (endOffset[1] === 0) {
|
||
|
buffers.pop()
|
||
|
} else {
|
||
|
buffers[buffers.length - 1] = buffers[buffers.length - 1].slice(0, endOffset[1])
|
||
|
}
|
||
|
|
||
|
if (startOffset[1] !== 0) {
|
||
|
buffers[0] = buffers[0].slice(startOffset[1])
|
||
|
}
|
||
|
|
||
|
return this._new(buffers)
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.toString = function toString (encoding, start, end) {
|
||
|
return this.slice(start, end).toString(encoding)
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.consume = function consume (bytes) {
|
||
|
// first, normalize the argument, in accordance with how Buffer does it
|
||
|
bytes = Math.trunc(bytes)
|
||
|
// do nothing if not a positive number
|
||
|
if (Number.isNaN(bytes) || bytes <= 0) return this
|
||
|
|
||
|
while (this._bufs.length) {
|
||
|
if (bytes >= this._bufs[0].length) {
|
||
|
bytes -= this._bufs[0].length
|
||
|
this.length -= this._bufs[0].length
|
||
|
this._bufs.shift()
|
||
|
} else {
|
||
|
this._bufs[0] = this._bufs[0].slice(bytes)
|
||
|
this.length -= bytes
|
||
|
break
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return this
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.duplicate = function duplicate () {
|
||
|
const copy = this._new()
|
||
|
|
||
|
for (let i = 0; i < this._bufs.length; i++) {
|
||
|
copy.append(this._bufs[i])
|
||
|
}
|
||
|
|
||
|
return copy
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.append = function append (buf) {
|
||
|
if (buf == null) {
|
||
|
return this
|
||
|
}
|
||
|
|
||
|
if (buf.buffer) {
|
||
|
// append a view of the underlying ArrayBuffer
|
||
|
this._appendBuffer(Buffer.from(buf.buffer, buf.byteOffset, buf.byteLength))
|
||
|
} else if (Array.isArray(buf)) {
|
||
|
for (let i = 0; i < buf.length; i++) {
|
||
|
this.append(buf[i])
|
||
|
}
|
||
|
} else if (this._isBufferList(buf)) {
|
||
|
// unwrap argument into individual BufferLists
|
||
|
for (let i = 0; i < buf._bufs.length; i++) {
|
||
|
this.append(buf._bufs[i])
|
||
|
}
|
||
|
} else {
|
||
|
// coerce number arguments to strings, since Buffer(number) does
|
||
|
// uninitialized memory allocation
|
||
|
if (typeof buf === 'number') {
|
||
|
buf = buf.toString()
|
||
|
}
|
||
|
|
||
|
this._appendBuffer(Buffer.from(buf))
|
||
|
}
|
||
|
|
||
|
return this
|
||
|
}
|
||
|
|
||
|
BufferList.prototype._appendBuffer = function appendBuffer (buf) {
|
||
|
this._bufs.push(buf)
|
||
|
this.length += buf.length
|
||
|
}
|
||
|
|
||
|
BufferList.prototype.indexOf = function (search, offset, encoding) {
|
||
|
if (encoding === undefined && typeof offset === 'string') {
|
||
|
encoding = offset
|
||
|
offset = undefined
|
||
|
}
|
||
|
|
||
|
if (typeof search === 'function' || Array.isArray(search)) {
|
||
|
throw new TypeError('The "value" argument must be one of type string, Buffer, BufferList, or Uint8Array.')
|
||
|
} else if (typeof search === 'number') {
|
||
|
search = Buffer.from([search])
|
||
|
} else if (typeof search === 'string') {
|
||
|
search = Buffer.from(search, encoding)
|
||
|
} else if (this._isBufferList(search)) {
|
||
|
search = search.slice()
|
||
|
} else if (Array.isArray(search.buffer)) {
|
||
|
search = Buffer.from(search.buffer, search.byteOffset, search.byteLength)
|
||
|
} else if (!Buffer.isBuffer(search)) {
|
||
|
search = Buffer.from(search)
|
||
|
}
|
||
|
|
||
|
offset = Number(offset || 0)
|
||
|
|
||
|
if (isNaN(offset)) {
|
||
|
offset = 0
|
||
|
}
|
||
|
|
||
|
if (offset < 0) {
|
||
|
offset = this.length + offset
|
||
|
}
|
||
|
|
||
|
if (offset < 0) {
|
||
|
offset = 0
|
||
|
}
|
||
|
|
||
|
if (search.length === 0) {
|
||
|
return offset > this.length ? this.length : offset
|
||
|
}
|
||
|
|
||
|
const blOffset = this._offset(offset)
|
||
|
let blIndex = blOffset[0] // index of which internal buffer we're working on
|
||
|
let buffOffset = blOffset[1] // offset of the internal buffer we're working on
|
||
|
|
||
|
// scan over each buffer
|
||
|
for (; blIndex < this._bufs.length; blIndex++) {
|
||
|
const buff = this._bufs[blIndex]
|
||
|
|
||
|
while (buffOffset < buff.length) {
|
||
|
const availableWindow = buff.length - buffOffset
|
||
|
|
||
|
if (availableWindow >= search.length) {
|
||
|
const nativeSearchResult = buff.indexOf(search, buffOffset)
|
||
|
|
||
|
if (nativeSearchResult !== -1) {
|
||
|
return this._reverseOffset([blIndex, nativeSearchResult])
|
||
|
}
|
||
|
|
||
|
buffOffset = buff.length - search.length + 1 // end of native search window
|
||
|
} else {
|
||
|
const revOffset = this._reverseOffset([blIndex, buffOffset])
|
||
|
|
||
|
if (this._match(revOffset, search)) {
|
||
|
return revOffset
|
||
|
}
|
||
|
|
||
|
buffOffset++
|
||
|
}
|
||
|
}
|
||
|
|
||
|
buffOffset = 0
|
||
|
}
|
||
|
|
||
|
return -1
|
||
|
}
|
||
|
|
||
|
BufferList.prototype._match = function (offset, search) {
|
||
|
if (this.length - offset < search.length) {
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
for (let searchOffset = 0; searchOffset < search.length; searchOffset++) {
|
||
|
if (this.get(offset + searchOffset) !== search[searchOffset]) {
|
||
|
return false
|
||
|
}
|
||
|
}
|
||
|
return true
|
||
|
}
|
||
|
|
||
|
;(function () {
|
||
|
const methods = {
|
||
|
readDoubleBE: 8,
|
||
|
readDoubleLE: 8,
|
||
|
readFloatBE: 4,
|
||
|
readFloatLE: 4,
|
||
|
readInt32BE: 4,
|
||
|
readInt32LE: 4,
|
||
|
readUInt32BE: 4,
|
||
|
readUInt32LE: 4,
|
||
|
readInt16BE: 2,
|
||
|
readInt16LE: 2,
|
||
|
readUInt16BE: 2,
|
||
|
readUInt16LE: 2,
|
||
|
readInt8: 1,
|
||
|
readUInt8: 1,
|
||
|
readIntBE: null,
|
||
|
readIntLE: null,
|
||
|
readUIntBE: null,
|
||
|
readUIntLE: null
|
||
|
}
|
||
|
|
||
|
for (const m in methods) {
|
||
|
(function (m) {
|
||
|
if (methods[m] === null) {
|
||
|
BufferList.prototype[m] = function (offset, byteLength) {
|
||
|
return this.slice(offset, offset + byteLength)[m](0, byteLength)
|
||
|
}
|
||
|
} else {
|
||
|
BufferList.prototype[m] = function (offset = 0) {
|
||
|
return this.slice(offset, offset + methods[m])[m](0)
|
||
|
}
|
||
|
}
|
||
|
}(m))
|
||
|
}
|
||
|
}())
|
||
|
|
||
|
// Used internally by the class and also as an indicator of this object being
|
||
|
// a `BufferList`. It's not possible to use `instanceof BufferList` in a browser
|
||
|
// environment because there could be multiple different copies of the
|
||
|
// BufferList class and some `BufferList`s might be `BufferList`s.
|
||
|
BufferList.prototype._isBufferList = function _isBufferList (b) {
|
||
|
return b instanceof BufferList || BufferList.isBufferList(b)
|
||
|
}
|
||
|
|
||
|
BufferList.isBufferList = function isBufferList (b) {
|
||
|
return b != null && b[symbol]
|
||
|
}
|
||
|
|
||
|
module.exports = BufferList
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 10022:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
|
||
|
|
||
|
const DuplexStream = __webpack_require__(11451).Duplex
|
||
|
const inherits = __webpack_require__(94378)
|
||
|
const BufferList = __webpack_require__(9668)
|
||
|
|
||
|
function BufferListStream (callback) {
|
||
|
if (!(this instanceof BufferListStream)) {
|
||
|
return new BufferListStream(callback)
|
||
|
}
|
||
|
|
||
|
if (typeof callback === 'function') {
|
||
|
this._callback = callback
|
||
|
|
||
|
const piper = function piper (err) {
|
||
|
if (this._callback) {
|
||
|
this._callback(err)
|
||
|
this._callback = null
|
||
|
}
|
||
|
}.bind(this)
|
||
|
|
||
|
this.on('pipe', function onPipe (src) {
|
||
|
src.on('error', piper)
|
||
|
})
|
||
|
this.on('unpipe', function onUnpipe (src) {
|
||
|
src.removeListener('error', piper)
|
||
|
})
|
||
|
|
||
|
callback = null
|
||
|
}
|
||
|
|
||
|
BufferList._init.call(this, callback)
|
||
|
DuplexStream.call(this)
|
||
|
}
|
||
|
|
||
|
inherits(BufferListStream, DuplexStream)
|
||
|
Object.assign(BufferListStream.prototype, BufferList.prototype)
|
||
|
|
||
|
BufferListStream.prototype._new = function _new (callback) {
|
||
|
return new BufferListStream(callback)
|
||
|
}
|
||
|
|
||
|
BufferListStream.prototype._write = function _write (buf, encoding, callback) {
|
||
|
this._appendBuffer(buf)
|
||
|
|
||
|
if (typeof callback === 'function') {
|
||
|
callback()
|
||
|
}
|
||
|
}
|
||
|
|
||
|
BufferListStream.prototype._read = function _read (size) {
|
||
|
if (!this.length) {
|
||
|
return this.push(null)
|
||
|
}
|
||
|
|
||
|
size = Math.min(size, this.length)
|
||
|
this.push(this.slice(0, size))
|
||
|
this.consume(size)
|
||
|
}
|
||
|
|
||
|
BufferListStream.prototype.end = function end (chunk) {
|
||
|
DuplexStream.prototype.end.call(this, chunk)
|
||
|
|
||
|
if (this._callback) {
|
||
|
this._callback(null, this.slice())
|
||
|
this._callback = null
|
||
|
}
|
||
|
}
|
||
|
|
||
|
BufferListStream.prototype._destroy = function _destroy (err, cb) {
|
||
|
this._bufs.length = 0
|
||
|
this.length = 0
|
||
|
cb(err)
|
||
|
}
|
||
|
|
||
|
BufferListStream.prototype._isBufferList = function _isBufferList (b) {
|
||
|
return b instanceof BufferListStream || b instanceof BufferList || BufferListStream.isBufferList(b)
|
||
|
}
|
||
|
|
||
|
BufferListStream.isBufferList = BufferList.isBufferList
|
||
|
|
||
|
module.exports = BufferListStream
|
||
|
module.exports.BufferListStream = BufferListStream
|
||
|
module.exports.BufferList = BufferList
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 94378:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
try {
|
||
|
var util = __webpack_require__(31669);
|
||
|
/* istanbul ignore next */
|
||
|
if (typeof util.inherits !== 'function') throw '';
|
||
|
module.exports = util.inherits;
|
||
|
} catch (e) {
|
||
|
/* istanbul ignore next */
|
||
|
module.exports = __webpack_require__(35717);
|
||
|
}
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 35717:
|
||
|
/***/ ((module) => {
|
||
|
|
||
|
if (typeof Object.create === 'function') {
|
||
|
// implementation from standard node.js 'util' module
|
||
|
module.exports = function inherits(ctor, superCtor) {
|
||
|
if (superCtor) {
|
||
|
ctor.super_ = superCtor
|
||
|
ctor.prototype = Object.create(superCtor.prototype, {
|
||
|
constructor: {
|
||
|
value: ctor,
|
||
|
enumerable: false,
|
||
|
writable: true,
|
||
|
configurable: true
|
||
|
}
|
||
|
})
|
||
|
}
|
||
|
};
|
||
|
} else {
|
||
|
// old school shim for old browsers
|
||
|
module.exports = function inherits(ctor, superCtor) {
|
||
|
if (superCtor) {
|
||
|
ctor.super_ = superCtor
|
||
|
var TempCtor = function () {}
|
||
|
TempCtor.prototype = superCtor.prototype
|
||
|
ctor.prototype = new TempCtor()
|
||
|
ctor.prototype.constructor = ctor
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 4012:
|
||
|
/***/ ((module) => {
|
||
|
|
||
|
"use strict";
|
||
|
|
||
|
|
||
|
const codes = {};
|
||
|
|
||
|
function createErrorType(code, message, Base) {
|
||
|
if (!Base) {
|
||
|
Base = Error
|
||
|
}
|
||
|
|
||
|
function getMessage (arg1, arg2, arg3) {
|
||
|
if (typeof message === 'string') {
|
||
|
return message
|
||
|
} else {
|
||
|
return message(arg1, arg2, arg3)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
class NodeError extends Base {
|
||
|
constructor (arg1, arg2, arg3) {
|
||
|
super(getMessage(arg1, arg2, arg3));
|
||
|
}
|
||
|
}
|
||
|
|
||
|
NodeError.prototype.name = Base.name;
|
||
|
NodeError.prototype.code = code;
|
||
|
|
||
|
codes[code] = NodeError;
|
||
|
}
|
||
|
|
||
|
// https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js
|
||
|
function oneOf(expected, thing) {
|
||
|
if (Array.isArray(expected)) {
|
||
|
const len = expected.length;
|
||
|
expected = expected.map((i) => String(i));
|
||
|
if (len > 2) {
|
||
|
return `one of ${thing} ${expected.slice(0, len - 1).join(', ')}, or ` +
|
||
|
expected[len - 1];
|
||
|
} else if (len === 2) {
|
||
|
return `one of ${thing} ${expected[0]} or ${expected[1]}`;
|
||
|
} else {
|
||
|
return `of ${thing} ${expected[0]}`;
|
||
|
}
|
||
|
} else {
|
||
|
return `of ${thing} ${String(expected)}`;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith
|
||
|
function startsWith(str, search, pos) {
|
||
|
return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search;
|
||
|
}
|
||
|
|
||
|
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith
|
||
|
function endsWith(str, search, this_len) {
|
||
|
if (this_len === undefined || this_len > str.length) {
|
||
|
this_len = str.length;
|
||
|
}
|
||
|
return str.substring(this_len - search.length, this_len) === search;
|
||
|
}
|
||
|
|
||
|
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes
|
||
|
function includes(str, search, start) {
|
||
|
if (typeof start !== 'number') {
|
||
|
start = 0;
|
||
|
}
|
||
|
|
||
|
if (start + search.length > str.length) {
|
||
|
return false;
|
||
|
} else {
|
||
|
return str.indexOf(search, start) !== -1;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) {
|
||
|
return 'The value "' + value + '" is invalid for option "' + name + '"'
|
||
|
}, TypeError);
|
||
|
createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) {
|
||
|
// determiner: 'must be' or 'must not be'
|
||
|
let determiner;
|
||
|
if (typeof expected === 'string' && startsWith(expected, 'not ')) {
|
||
|
determiner = 'must not be';
|
||
|
expected = expected.replace(/^not /, '');
|
||
|
} else {
|
||
|
determiner = 'must be';
|
||
|
}
|
||
|
|
||
|
let msg;
|
||
|
if (endsWith(name, ' argument')) {
|
||
|
// For cases like 'first argument'
|
||
|
msg = `The ${name} ${determiner} ${oneOf(expected, 'type')}`;
|
||
|
} else {
|
||
|
const type = includes(name, '.') ? 'property' : 'argument';
|
||
|
msg = `The "${name}" ${type} ${determiner} ${oneOf(expected, 'type')}`;
|
||
|
}
|
||
|
|
||
|
msg += `. Received type ${typeof actual}`;
|
||
|
return msg;
|
||
|
}, TypeError);
|
||
|
createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF');
|
||
|
createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) {
|
||
|
return 'The ' + name + ' method is not implemented'
|
||
|
});
|
||
|
createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close');
|
||
|
createErrorType('ERR_STREAM_DESTROYED', function (name) {
|
||
|
return 'Cannot call ' + name + ' after a stream was destroyed';
|
||
|
});
|
||
|
createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times');
|
||
|
createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable');
|
||
|
createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end');
|
||
|
createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError);
|
||
|
createErrorType('ERR_UNKNOWN_ENCODING', function (arg) {
|
||
|
return 'Unknown encoding: ' + arg
|
||
|
}, TypeError);
|
||
|
createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event');
|
||
|
|
||
|
module.exports.q = codes;
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 56753:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
// Copyright Joyent, Inc. and other Node contributors.
|
||
|
//
|
||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||
|
// copy of this software and associated documentation files (the
|
||
|
// "Software"), to deal in the Software without restriction, including
|
||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||
|
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||
|
// persons to whom the Software is furnished to do so, subject to the
|
||
|
// following conditions:
|
||
|
//
|
||
|
// The above copyright notice and this permission notice shall be included
|
||
|
// in all copies or substantial portions of the Software.
|
||
|
//
|
||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||
|
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||
|
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||
|
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||
|
// a duplex stream is just a stream that is both readable and writable.
|
||
|
// Since JS doesn't have multiple prototypal inheritance, this class
|
||
|
// prototypally inherits from Readable, and then parasitically from
|
||
|
// Writable.
|
||
|
|
||
|
/*<replacement>*/
|
||
|
|
||
|
var objectKeys = Object.keys || function (obj) {
|
||
|
var keys = [];
|
||
|
|
||
|
for (var key in obj) {
|
||
|
keys.push(key);
|
||
|
}
|
||
|
|
||
|
return keys;
|
||
|
};
|
||
|
/*</replacement>*/
|
||
|
|
||
|
|
||
|
module.exports = Duplex;
|
||
|
|
||
|
var Readable = __webpack_require__(79481);
|
||
|
|
||
|
var Writable = __webpack_require__(64229);
|
||
|
|
||
|
__webpack_require__(94378)(Duplex, Readable);
|
||
|
|
||
|
{
|
||
|
// Allow the keys array to be GC'ed.
|
||
|
var keys = objectKeys(Writable.prototype);
|
||
|
|
||
|
for (var v = 0; v < keys.length; v++) {
|
||
|
var method = keys[v];
|
||
|
if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method];
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function Duplex(options) {
|
||
|
if (!(this instanceof Duplex)) return new Duplex(options);
|
||
|
Readable.call(this, options);
|
||
|
Writable.call(this, options);
|
||
|
this.allowHalfOpen = true;
|
||
|
|
||
|
if (options) {
|
||
|
if (options.readable === false) this.readable = false;
|
||
|
if (options.writable === false) this.writable = false;
|
||
|
|
||
|
if (options.allowHalfOpen === false) {
|
||
|
this.allowHalfOpen = false;
|
||
|
this.once('end', onend);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._writableState.highWaterMark;
|
||
|
}
|
||
|
});
|
||
|
Object.defineProperty(Duplex.prototype, 'writableBuffer', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._writableState && this._writableState.getBuffer();
|
||
|
}
|
||
|
});
|
||
|
Object.defineProperty(Duplex.prototype, 'writableLength', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._writableState.length;
|
||
|
}
|
||
|
}); // the no-half-open enforcer
|
||
|
|
||
|
function onend() {
|
||
|
// If the writable side ended, then we're ok.
|
||
|
if (this._writableState.ended) return; // no more data can be written.
|
||
|
// But allow more writes to happen in this tick.
|
||
|
|
||
|
process.nextTick(onEndNT, this);
|
||
|
}
|
||
|
|
||
|
function onEndNT(self) {
|
||
|
self.end();
|
||
|
}
|
||
|
|
||
|
Object.defineProperty(Duplex.prototype, 'destroyed', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
if (this._readableState === undefined || this._writableState === undefined) {
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
return this._readableState.destroyed && this._writableState.destroyed;
|
||
|
},
|
||
|
set: function set(value) {
|
||
|
// we ignore the value if the stream
|
||
|
// has not been initialized yet
|
||
|
if (this._readableState === undefined || this._writableState === undefined) {
|
||
|
return;
|
||
|
} // backward compatibility, the user is explicitly
|
||
|
// managing destroyed
|
||
|
|
||
|
|
||
|
this._readableState.destroyed = value;
|
||
|
this._writableState.destroyed = value;
|
||
|
}
|
||
|
});
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 82725:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
// Copyright Joyent, Inc. and other Node contributors.
|
||
|
//
|
||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||
|
// copy of this software and associated documentation files (the
|
||
|
// "Software"), to deal in the Software without restriction, including
|
||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||
|
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||
|
// persons to whom the Software is furnished to do so, subject to the
|
||
|
// following conditions:
|
||
|
//
|
||
|
// The above copyright notice and this permission notice shall be included
|
||
|
// in all copies or substantial portions of the Software.
|
||
|
//
|
||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||
|
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||
|
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||
|
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||
|
// a passthrough stream.
|
||
|
// basically just the most minimal sort of Transform stream.
|
||
|
// Every written chunk gets output as-is.
|
||
|
|
||
|
|
||
|
module.exports = PassThrough;
|
||
|
|
||
|
var Transform = __webpack_require__(74605);
|
||
|
|
||
|
__webpack_require__(94378)(PassThrough, Transform);
|
||
|
|
||
|
function PassThrough(options) {
|
||
|
if (!(this instanceof PassThrough)) return new PassThrough(options);
|
||
|
Transform.call(this, options);
|
||
|
}
|
||
|
|
||
|
PassThrough.prototype._transform = function (chunk, encoding, cb) {
|
||
|
cb(null, chunk);
|
||
|
};
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 79481:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
// Copyright Joyent, Inc. and other Node contributors.
|
||
|
//
|
||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||
|
// copy of this software and associated documentation files (the
|
||
|
// "Software"), to deal in the Software without restriction, including
|
||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||
|
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||
|
// persons to whom the Software is furnished to do so, subject to the
|
||
|
// following conditions:
|
||
|
//
|
||
|
// The above copyright notice and this permission notice shall be included
|
||
|
// in all copies or substantial portions of the Software.
|
||
|
//
|
||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||
|
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||
|
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||
|
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||
|
|
||
|
|
||
|
module.exports = Readable;
|
||
|
/*<replacement>*/
|
||
|
|
||
|
var Duplex;
|
||
|
/*</replacement>*/
|
||
|
|
||
|
Readable.ReadableState = ReadableState;
|
||
|
/*<replacement>*/
|
||
|
|
||
|
var EE = __webpack_require__(28614).EventEmitter;
|
||
|
|
||
|
var EElistenerCount = function EElistenerCount(emitter, type) {
|
||
|
return emitter.listeners(type).length;
|
||
|
};
|
||
|
/*</replacement>*/
|
||
|
|
||
|
/*<replacement>*/
|
||
|
|
||
|
|
||
|
var Stream = __webpack_require__(79740);
|
||
|
/*</replacement>*/
|
||
|
|
||
|
|
||
|
var Buffer = __webpack_require__(64293).Buffer;
|
||
|
|
||
|
var OurUint8Array = global.Uint8Array || function () {};
|
||
|
|
||
|
function _uint8ArrayToBuffer(chunk) {
|
||
|
return Buffer.from(chunk);
|
||
|
}
|
||
|
|
||
|
function _isUint8Array(obj) {
|
||
|
return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
|
||
|
}
|
||
|
/*<replacement>*/
|
||
|
|
||
|
|
||
|
var debugUtil = __webpack_require__(31669);
|
||
|
|
||
|
var debug;
|
||
|
|
||
|
if (debugUtil && debugUtil.debuglog) {
|
||
|
debug = debugUtil.debuglog('stream');
|
||
|
} else {
|
||
|
debug = function debug() {};
|
||
|
}
|
||
|
/*</replacement>*/
|
||
|
|
||
|
|
||
|
var BufferList = __webpack_require__(57327);
|
||
|
|
||
|
var destroyImpl = __webpack_require__(61195);
|
||
|
|
||
|
var _require = __webpack_require__(82457),
|
||
|
getHighWaterMark = _require.getHighWaterMark;
|
||
|
|
||
|
var _require$codes = __webpack_require__(4012)/* .codes */ .q,
|
||
|
ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,
|
||
|
ERR_STREAM_PUSH_AFTER_EOF = _require$codes.ERR_STREAM_PUSH_AFTER_EOF,
|
||
|
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
|
||
|
ERR_STREAM_UNSHIFT_AFTER_END_EVENT = _require$codes.ERR_STREAM_UNSHIFT_AFTER_END_EVENT; // Lazy loaded to improve the startup performance.
|
||
|
|
||
|
|
||
|
var StringDecoder;
|
||
|
var createReadableStreamAsyncIterator;
|
||
|
var from;
|
||
|
|
||
|
__webpack_require__(94378)(Readable, Stream);
|
||
|
|
||
|
var errorOrDestroy = destroyImpl.errorOrDestroy;
|
||
|
var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume'];
|
||
|
|
||
|
function prependListener(emitter, event, fn) {
|
||
|
// Sadly this is not cacheable as some libraries bundle their own
|
||
|
// event emitter implementation with them.
|
||
|
if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn); // This is a hack to make sure that our error handler is attached before any
|
||
|
// userland ones. NEVER DO THIS. This is here only because this code needs
|
||
|
// to continue to work with older versions of Node.js that do not include
|
||
|
// the prependListener() method. The goal is to eventually remove this hack.
|
||
|
|
||
|
if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (Array.isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]];
|
||
|
}
|
||
|
|
||
|
function ReadableState(options, stream, isDuplex) {
|
||
|
Duplex = Duplex || __webpack_require__(56753);
|
||
|
options = options || {}; // Duplex streams are both readable and writable, but share
|
||
|
// the same options object.
|
||
|
// However, some cases require setting options to different
|
||
|
// values for the readable and the writable sides of the duplex stream.
|
||
|
// These options can be provided separately as readableXXX and writableXXX.
|
||
|
|
||
|
if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; // object stream flag. Used to make read(n) ignore n and to
|
||
|
// make all the buffer merging and length checks go away
|
||
|
|
||
|
this.objectMode = !!options.objectMode;
|
||
|
if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode; // the point at which it stops calling _read() to fill the buffer
|
||
|
// Note: 0 is a valid value, means "don't call _read preemptively ever"
|
||
|
|
||
|
this.highWaterMark = getHighWaterMark(this, options, 'readableHighWaterMark', isDuplex); // A linked list is used to store data chunks instead of an array because the
|
||
|
// linked list can remove elements from the beginning faster than
|
||
|
// array.shift()
|
||
|
|
||
|
this.buffer = new BufferList();
|
||
|
this.length = 0;
|
||
|
this.pipes = null;
|
||
|
this.pipesCount = 0;
|
||
|
this.flowing = null;
|
||
|
this.ended = false;
|
||
|
this.endEmitted = false;
|
||
|
this.reading = false; // a flag to be able to tell if the event 'readable'/'data' is emitted
|
||
|
// immediately, or on a later tick. We set this to true at first, because
|
||
|
// any actions that shouldn't happen until "later" should generally also
|
||
|
// not happen before the first read call.
|
||
|
|
||
|
this.sync = true; // whenever we return null, then we set a flag to say
|
||
|
// that we're awaiting a 'readable' event emission.
|
||
|
|
||
|
this.needReadable = false;
|
||
|
this.emittedReadable = false;
|
||
|
this.readableListening = false;
|
||
|
this.resumeScheduled = false;
|
||
|
this.paused = true; // Should close be emitted on destroy. Defaults to true.
|
||
|
|
||
|
this.emitClose = options.emitClose !== false; // Should .destroy() be called after 'end' (and potentially 'finish')
|
||
|
|
||
|
this.autoDestroy = !!options.autoDestroy; // has it been destroyed
|
||
|
|
||
|
this.destroyed = false; // Crypto is kind of old and crusty. Historically, its default string
|
||
|
// encoding is 'binary' so we have to make this configurable.
|
||
|
// Everything else in the universe uses 'utf8', though.
|
||
|
|
||
|
this.defaultEncoding = options.defaultEncoding || 'utf8'; // the number of writers that are awaiting a drain event in .pipe()s
|
||
|
|
||
|
this.awaitDrain = 0; // if true, a maybeReadMore has been scheduled
|
||
|
|
||
|
this.readingMore = false;
|
||
|
this.decoder = null;
|
||
|
this.encoding = null;
|
||
|
|
||
|
if (options.encoding) {
|
||
|
if (!StringDecoder) StringDecoder = __webpack_require__(32553)/* .StringDecoder */ .s;
|
||
|
this.decoder = new StringDecoder(options.encoding);
|
||
|
this.encoding = options.encoding;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function Readable(options) {
|
||
|
Duplex = Duplex || __webpack_require__(56753);
|
||
|
if (!(this instanceof Readable)) return new Readable(options); // Checking for a Stream.Duplex instance is faster here instead of inside
|
||
|
// the ReadableState constructor, at least with V8 6.5
|
||
|
|
||
|
var isDuplex = this instanceof Duplex;
|
||
|
this._readableState = new ReadableState(options, this, isDuplex); // legacy
|
||
|
|
||
|
this.readable = true;
|
||
|
|
||
|
if (options) {
|
||
|
if (typeof options.read === 'function') this._read = options.read;
|
||
|
if (typeof options.destroy === 'function') this._destroy = options.destroy;
|
||
|
}
|
||
|
|
||
|
Stream.call(this);
|
||
|
}
|
||
|
|
||
|
Object.defineProperty(Readable.prototype, 'destroyed', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
if (this._readableState === undefined) {
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
return this._readableState.destroyed;
|
||
|
},
|
||
|
set: function set(value) {
|
||
|
// we ignore the value if the stream
|
||
|
// has not been initialized yet
|
||
|
if (!this._readableState) {
|
||
|
return;
|
||
|
} // backward compatibility, the user is explicitly
|
||
|
// managing destroyed
|
||
|
|
||
|
|
||
|
this._readableState.destroyed = value;
|
||
|
}
|
||
|
});
|
||
|
Readable.prototype.destroy = destroyImpl.destroy;
|
||
|
Readable.prototype._undestroy = destroyImpl.undestroy;
|
||
|
|
||
|
Readable.prototype._destroy = function (err, cb) {
|
||
|
cb(err);
|
||
|
}; // Manually shove something into the read() buffer.
|
||
|
// This returns true if the highWaterMark has not been hit yet,
|
||
|
// similar to how Writable.write() returns true if you should
|
||
|
// write() some more.
|
||
|
|
||
|
|
||
|
Readable.prototype.push = function (chunk, encoding) {
|
||
|
var state = this._readableState;
|
||
|
var skipChunkCheck;
|
||
|
|
||
|
if (!state.objectMode) {
|
||
|
if (typeof chunk === 'string') {
|
||
|
encoding = encoding || state.defaultEncoding;
|
||
|
|
||
|
if (encoding !== state.encoding) {
|
||
|
chunk = Buffer.from(chunk, encoding);
|
||
|
encoding = '';
|
||
|
}
|
||
|
|
||
|
skipChunkCheck = true;
|
||
|
}
|
||
|
} else {
|
||
|
skipChunkCheck = true;
|
||
|
}
|
||
|
|
||
|
return readableAddChunk(this, chunk, encoding, false, skipChunkCheck);
|
||
|
}; // Unshift should *always* be something directly out of read()
|
||
|
|
||
|
|
||
|
Readable.prototype.unshift = function (chunk) {
|
||
|
return readableAddChunk(this, chunk, null, true, false);
|
||
|
};
|
||
|
|
||
|
function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) {
|
||
|
debug('readableAddChunk', chunk);
|
||
|
var state = stream._readableState;
|
||
|
|
||
|
if (chunk === null) {
|
||
|
state.reading = false;
|
||
|
onEofChunk(stream, state);
|
||
|
} else {
|
||
|
var er;
|
||
|
if (!skipChunkCheck) er = chunkInvalid(state, chunk);
|
||
|
|
||
|
if (er) {
|
||
|
errorOrDestroy(stream, er);
|
||
|
} else if (state.objectMode || chunk && chunk.length > 0) {
|
||
|
if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) {
|
||
|
chunk = _uint8ArrayToBuffer(chunk);
|
||
|
}
|
||
|
|
||
|
if (addToFront) {
|
||
|
if (state.endEmitted) errorOrDestroy(stream, new ERR_STREAM_UNSHIFT_AFTER_END_EVENT());else addChunk(stream, state, chunk, true);
|
||
|
} else if (state.ended) {
|
||
|
errorOrDestroy(stream, new ERR_STREAM_PUSH_AFTER_EOF());
|
||
|
} else if (state.destroyed) {
|
||
|
return false;
|
||
|
} else {
|
||
|
state.reading = false;
|
||
|
|
||
|
if (state.decoder && !encoding) {
|
||
|
chunk = state.decoder.write(chunk);
|
||
|
if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state);
|
||
|
} else {
|
||
|
addChunk(stream, state, chunk, false);
|
||
|
}
|
||
|
}
|
||
|
} else if (!addToFront) {
|
||
|
state.reading = false;
|
||
|
maybeReadMore(stream, state);
|
||
|
}
|
||
|
} // We can push more data if we are below the highWaterMark.
|
||
|
// Also, if we have no data yet, we can stand some more bytes.
|
||
|
// This is to work around cases where hwm=0, such as the repl.
|
||
|
|
||
|
|
||
|
return !state.ended && (state.length < state.highWaterMark || state.length === 0);
|
||
|
}
|
||
|
|
||
|
function addChunk(stream, state, chunk, addToFront) {
|
||
|
if (state.flowing && state.length === 0 && !state.sync) {
|
||
|
state.awaitDrain = 0;
|
||
|
stream.emit('data', chunk);
|
||
|
} else {
|
||
|
// update the buffer info.
|
||
|
state.length += state.objectMode ? 1 : chunk.length;
|
||
|
if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk);
|
||
|
if (state.needReadable) emitReadable(stream);
|
||
|
}
|
||
|
|
||
|
maybeReadMore(stream, state);
|
||
|
}
|
||
|
|
||
|
function chunkInvalid(state, chunk) {
|
||
|
var er;
|
||
|
|
||
|
if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) {
|
||
|
er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer', 'Uint8Array'], chunk);
|
||
|
}
|
||
|
|
||
|
return er;
|
||
|
}
|
||
|
|
||
|
Readable.prototype.isPaused = function () {
|
||
|
return this._readableState.flowing === false;
|
||
|
}; // backwards compatibility.
|
||
|
|
||
|
|
||
|
Readable.prototype.setEncoding = function (enc) {
|
||
|
if (!StringDecoder) StringDecoder = __webpack_require__(32553)/* .StringDecoder */ .s;
|
||
|
var decoder = new StringDecoder(enc);
|
||
|
this._readableState.decoder = decoder; // If setEncoding(null), decoder.encoding equals utf8
|
||
|
|
||
|
this._readableState.encoding = this._readableState.decoder.encoding; // Iterate over current buffer to convert already stored Buffers:
|
||
|
|
||
|
var p = this._readableState.buffer.head;
|
||
|
var content = '';
|
||
|
|
||
|
while (p !== null) {
|
||
|
content += decoder.write(p.data);
|
||
|
p = p.next;
|
||
|
}
|
||
|
|
||
|
this._readableState.buffer.clear();
|
||
|
|
||
|
if (content !== '') this._readableState.buffer.push(content);
|
||
|
this._readableState.length = content.length;
|
||
|
return this;
|
||
|
}; // Don't raise the hwm > 1GB
|
||
|
|
||
|
|
||
|
var MAX_HWM = 0x40000000;
|
||
|
|
||
|
function computeNewHighWaterMark(n) {
|
||
|
if (n >= MAX_HWM) {
|
||
|
// TODO(ronag): Throw ERR_VALUE_OUT_OF_RANGE.
|
||
|
n = MAX_HWM;
|
||
|
} else {
|
||
|
// Get the next highest power of 2 to prevent increasing hwm excessively in
|
||
|
// tiny amounts
|
||
|
n--;
|
||
|
n |= n >>> 1;
|
||
|
n |= n >>> 2;
|
||
|
n |= n >>> 4;
|
||
|
n |= n >>> 8;
|
||
|
n |= n >>> 16;
|
||
|
n++;
|
||
|
}
|
||
|
|
||
|
return n;
|
||
|
} // This function is designed to be inlinable, so please take care when making
|
||
|
// changes to the function body.
|
||
|
|
||
|
|
||
|
function howMuchToRead(n, state) {
|
||
|
if (n <= 0 || state.length === 0 && state.ended) return 0;
|
||
|
if (state.objectMode) return 1;
|
||
|
|
||
|
if (n !== n) {
|
||
|
// Only flow one buffer at a time
|
||
|
if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length;
|
||
|
} // If we're asking for more than the current hwm, then raise the hwm.
|
||
|
|
||
|
|
||
|
if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n);
|
||
|
if (n <= state.length) return n; // Don't have enough
|
||
|
|
||
|
if (!state.ended) {
|
||
|
state.needReadable = true;
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
return state.length;
|
||
|
} // you can override either this method, or the async _read(n) below.
|
||
|
|
||
|
|
||
|
Readable.prototype.read = function (n) {
|
||
|
debug('read', n);
|
||
|
n = parseInt(n, 10);
|
||
|
var state = this._readableState;
|
||
|
var nOrig = n;
|
||
|
if (n !== 0) state.emittedReadable = false; // if we're doing read(0) to trigger a readable event, but we
|
||
|
// already have a bunch of data in the buffer, then just trigger
|
||
|
// the 'readable' event and move on.
|
||
|
|
||
|
if (n === 0 && state.needReadable && ((state.highWaterMark !== 0 ? state.length >= state.highWaterMark : state.length > 0) || state.ended)) {
|
||
|
debug('read: emitReadable', state.length, state.ended);
|
||
|
if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this);
|
||
|
return null;
|
||
|
}
|
||
|
|
||
|
n = howMuchToRead(n, state); // if we've ended, and we're now clear, then finish it up.
|
||
|
|
||
|
if (n === 0 && state.ended) {
|
||
|
if (state.length === 0) endReadable(this);
|
||
|
return null;
|
||
|
} // All the actual chunk generation logic needs to be
|
||
|
// *below* the call to _read. The reason is that in certain
|
||
|
// synthetic stream cases, such as passthrough streams, _read
|
||
|
// may be a completely synchronous operation which may change
|
||
|
// the state of the read buffer, providing enough data when
|
||
|
// before there was *not* enough.
|
||
|
//
|
||
|
// So, the steps are:
|
||
|
// 1. Figure out what the state of things will be after we do
|
||
|
// a read from the buffer.
|
||
|
//
|
||
|
// 2. If that resulting state will trigger a _read, then call _read.
|
||
|
// Note that this may be asynchronous, or synchronous. Yes, it is
|
||
|
// deeply ugly to write APIs this way, but that still doesn't mean
|
||
|
// that the Readable class should behave improperly, as streams are
|
||
|
// designed to be sync/async agnostic.
|
||
|
// Take note if the _read call is sync or async (ie, if the read call
|
||
|
// has returned yet), so that we know whether or not it's safe to emit
|
||
|
// 'readable' etc.
|
||
|
//
|
||
|
// 3. Actually pull the requested chunks out of the buffer and return.
|
||
|
// if we need a readable event, then we need to do some reading.
|
||
|
|
||
|
|
||
|
var doRead = state.needReadable;
|
||
|
debug('need readable', doRead); // if we currently have less than the highWaterMark, then also read some
|
||
|
|
||
|
if (state.length === 0 || state.length - n < state.highWaterMark) {
|
||
|
doRead = true;
|
||
|
debug('length less than watermark', doRead);
|
||
|
} // however, if we've ended, then there's no point, and if we're already
|
||
|
// reading, then it's unnecessary.
|
||
|
|
||
|
|
||
|
if (state.ended || state.reading) {
|
||
|
doRead = false;
|
||
|
debug('reading or ended', doRead);
|
||
|
} else if (doRead) {
|
||
|
debug('do read');
|
||
|
state.reading = true;
|
||
|
state.sync = true; // if the length is currently zero, then we *need* a readable event.
|
||
|
|
||
|
if (state.length === 0) state.needReadable = true; // call internal read method
|
||
|
|
||
|
this._read(state.highWaterMark);
|
||
|
|
||
|
state.sync = false; // If _read pushed data synchronously, then `reading` will be false,
|
||
|
// and we need to re-evaluate how much data we can return to the user.
|
||
|
|
||
|
if (!state.reading) n = howMuchToRead(nOrig, state);
|
||
|
}
|
||
|
|
||
|
var ret;
|
||
|
if (n > 0) ret = fromList(n, state);else ret = null;
|
||
|
|
||
|
if (ret === null) {
|
||
|
state.needReadable = state.length <= state.highWaterMark;
|
||
|
n = 0;
|
||
|
} else {
|
||
|
state.length -= n;
|
||
|
state.awaitDrain = 0;
|
||
|
}
|
||
|
|
||
|
if (state.length === 0) {
|
||
|
// If we have nothing in the buffer, then we want to know
|
||
|
// as soon as we *do* get something into the buffer.
|
||
|
if (!state.ended) state.needReadable = true; // If we tried to read() past the EOF, then emit end on the next tick.
|
||
|
|
||
|
if (nOrig !== n && state.ended) endReadable(this);
|
||
|
}
|
||
|
|
||
|
if (ret !== null) this.emit('data', ret);
|
||
|
return ret;
|
||
|
};
|
||
|
|
||
|
function onEofChunk(stream, state) {
|
||
|
debug('onEofChunk');
|
||
|
if (state.ended) return;
|
||
|
|
||
|
if (state.decoder) {
|
||
|
var chunk = state.decoder.end();
|
||
|
|
||
|
if (chunk && chunk.length) {
|
||
|
state.buffer.push(chunk);
|
||
|
state.length += state.objectMode ? 1 : chunk.length;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
state.ended = true;
|
||
|
|
||
|
if (state.sync) {
|
||
|
// if we are sync, wait until next tick to emit the data.
|
||
|
// Otherwise we risk emitting data in the flow()
|
||
|
// the readable code triggers during a read() call
|
||
|
emitReadable(stream);
|
||
|
} else {
|
||
|
// emit 'readable' now to make sure it gets picked up.
|
||
|
state.needReadable = false;
|
||
|
|
||
|
if (!state.emittedReadable) {
|
||
|
state.emittedReadable = true;
|
||
|
emitReadable_(stream);
|
||
|
}
|
||
|
}
|
||
|
} // Don't emit readable right away in sync mode, because this can trigger
|
||
|
// another read() call => stack overflow. This way, it might trigger
|
||
|
// a nextTick recursion warning, but that's not so bad.
|
||
|
|
||
|
|
||
|
function emitReadable(stream) {
|
||
|
var state = stream._readableState;
|
||
|
debug('emitReadable', state.needReadable, state.emittedReadable);
|
||
|
state.needReadable = false;
|
||
|
|
||
|
if (!state.emittedReadable) {
|
||
|
debug('emitReadable', state.flowing);
|
||
|
state.emittedReadable = true;
|
||
|
process.nextTick(emitReadable_, stream);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function emitReadable_(stream) {
|
||
|
var state = stream._readableState;
|
||
|
debug('emitReadable_', state.destroyed, state.length, state.ended);
|
||
|
|
||
|
if (!state.destroyed && (state.length || state.ended)) {
|
||
|
stream.emit('readable');
|
||
|
state.emittedReadable = false;
|
||
|
} // The stream needs another readable event if
|
||
|
// 1. It is not flowing, as the flow mechanism will take
|
||
|
// care of it.
|
||
|
// 2. It is not ended.
|
||
|
// 3. It is below the highWaterMark, so we can schedule
|
||
|
// another readable later.
|
||
|
|
||
|
|
||
|
state.needReadable = !state.flowing && !state.ended && state.length <= state.highWaterMark;
|
||
|
flow(stream);
|
||
|
} // at this point, the user has presumably seen the 'readable' event,
|
||
|
// and called read() to consume some data. that may have triggered
|
||
|
// in turn another _read(n) call, in which case reading = true if
|
||
|
// it's in progress.
|
||
|
// However, if we're not ended, or reading, and the length < hwm,
|
||
|
// then go ahead and try to read some more preemptively.
|
||
|
|
||
|
|
||
|
function maybeReadMore(stream, state) {
|
||
|
if (!state.readingMore) {
|
||
|
state.readingMore = true;
|
||
|
process.nextTick(maybeReadMore_, stream, state);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function maybeReadMore_(stream, state) {
|
||
|
// Attempt to read more data if we should.
|
||
|
//
|
||
|
// The conditions for reading more data are (one of):
|
||
|
// - Not enough data buffered (state.length < state.highWaterMark). The loop
|
||
|
// is responsible for filling the buffer with enough data if such data
|
||
|
// is available. If highWaterMark is 0 and we are not in the flowing mode
|
||
|
// we should _not_ attempt to buffer any extra data. We'll get more data
|
||
|
// when the stream consumer calls read() instead.
|
||
|
// - No data in the buffer, and the stream is in flowing mode. In this mode
|
||
|
// the loop below is responsible for ensuring read() is called. Failing to
|
||
|
// call read here would abort the flow and there's no other mechanism for
|
||
|
// continuing the flow if the stream consumer has just subscribed to the
|
||
|
// 'data' event.
|
||
|
//
|
||
|
// In addition to the above conditions to keep reading data, the following
|
||
|
// conditions prevent the data from being read:
|
||
|
// - The stream has ended (state.ended).
|
||
|
// - There is already a pending 'read' operation (state.reading). This is a
|
||
|
// case where the the stream has called the implementation defined _read()
|
||
|
// method, but they are processing the call asynchronously and have _not_
|
||
|
// called push() with new data. In this case we skip performing more
|
||
|
// read()s. The execution ends in this method again after the _read() ends
|
||
|
// up calling push() with more data.
|
||
|
while (!state.reading && !state.ended && (state.length < state.highWaterMark || state.flowing && state.length === 0)) {
|
||
|
var len = state.length;
|
||
|
debug('maybeReadMore read 0');
|
||
|
stream.read(0);
|
||
|
if (len === state.length) // didn't get any data, stop spinning.
|
||
|
break;
|
||
|
}
|
||
|
|
||
|
state.readingMore = false;
|
||
|
} // abstract method. to be overridden in specific implementation classes.
|
||
|
// call cb(er, data) where data is <= n in length.
|
||
|
// for virtual (non-string, non-buffer) streams, "length" is somewhat
|
||
|
// arbitrary, and perhaps not very meaningful.
|
||
|
|
||
|
|
||
|
Readable.prototype._read = function (n) {
|
||
|
errorOrDestroy(this, new ERR_METHOD_NOT_IMPLEMENTED('_read()'));
|
||
|
};
|
||
|
|
||
|
Readable.prototype.pipe = function (dest, pipeOpts) {
|
||
|
var src = this;
|
||
|
var state = this._readableState;
|
||
|
|
||
|
switch (state.pipesCount) {
|
||
|
case 0:
|
||
|
state.pipes = dest;
|
||
|
break;
|
||
|
|
||
|
case 1:
|
||
|
state.pipes = [state.pipes, dest];
|
||
|
break;
|
||
|
|
||
|
default:
|
||
|
state.pipes.push(dest);
|
||
|
break;
|
||
|
}
|
||
|
|
||
|
state.pipesCount += 1;
|
||
|
debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts);
|
||
|
var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr;
|
||
|
var endFn = doEnd ? onend : unpipe;
|
||
|
if (state.endEmitted) process.nextTick(endFn);else src.once('end', endFn);
|
||
|
dest.on('unpipe', onunpipe);
|
||
|
|
||
|
function onunpipe(readable, unpipeInfo) {
|
||
|
debug('onunpipe');
|
||
|
|
||
|
if (readable === src) {
|
||
|
if (unpipeInfo && unpipeInfo.hasUnpiped === false) {
|
||
|
unpipeInfo.hasUnpiped = true;
|
||
|
cleanup();
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function onend() {
|
||
|
debug('onend');
|
||
|
dest.end();
|
||
|
} // when the dest drains, it reduces the awaitDrain counter
|
||
|
// on the source. This would be more elegant with a .once()
|
||
|
// handler in flow(), but adding and removing repeatedly is
|
||
|
// too slow.
|
||
|
|
||
|
|
||
|
var ondrain = pipeOnDrain(src);
|
||
|
dest.on('drain', ondrain);
|
||
|
var cleanedUp = false;
|
||
|
|
||
|
function cleanup() {
|
||
|
debug('cleanup'); // cleanup event handlers once the pipe is broken
|
||
|
|
||
|
dest.removeListener('close', onclose);
|
||
|
dest.removeListener('finish', onfinish);
|
||
|
dest.removeListener('drain', ondrain);
|
||
|
dest.removeListener('error', onerror);
|
||
|
dest.removeListener('unpipe', onunpipe);
|
||
|
src.removeListener('end', onend);
|
||
|
src.removeListener('end', unpipe);
|
||
|
src.removeListener('data', ondata);
|
||
|
cleanedUp = true; // if the reader is waiting for a drain event from this
|
||
|
// specific writer, then it would cause it to never start
|
||
|
// flowing again.
|
||
|
// So, if this is awaiting a drain, then we just call it now.
|
||
|
// If we don't know, then assume that we are waiting for one.
|
||
|
|
||
|
if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain();
|
||
|
}
|
||
|
|
||
|
src.on('data', ondata);
|
||
|
|
||
|
function ondata(chunk) {
|
||
|
debug('ondata');
|
||
|
var ret = dest.write(chunk);
|
||
|
debug('dest.write', ret);
|
||
|
|
||
|
if (ret === false) {
|
||
|
// If the user unpiped during `dest.write()`, it is possible
|
||
|
// to get stuck in a permanently paused state if that write
|
||
|
// also returned false.
|
||
|
// => Check whether `dest` is still a piping destination.
|
||
|
if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) {
|
||
|
debug('false write response, pause', state.awaitDrain);
|
||
|
state.awaitDrain++;
|
||
|
}
|
||
|
|
||
|
src.pause();
|
||
|
}
|
||
|
} // if the dest has an error, then stop piping into it.
|
||
|
// however, don't suppress the throwing behavior for this.
|
||
|
|
||
|
|
||
|
function onerror(er) {
|
||
|
debug('onerror', er);
|
||
|
unpipe();
|
||
|
dest.removeListener('error', onerror);
|
||
|
if (EElistenerCount(dest, 'error') === 0) errorOrDestroy(dest, er);
|
||
|
} // Make sure our error handler is attached before userland ones.
|
||
|
|
||
|
|
||
|
prependListener(dest, 'error', onerror); // Both close and finish should trigger unpipe, but only once.
|
||
|
|
||
|
function onclose() {
|
||
|
dest.removeListener('finish', onfinish);
|
||
|
unpipe();
|
||
|
}
|
||
|
|
||
|
dest.once('close', onclose);
|
||
|
|
||
|
function onfinish() {
|
||
|
debug('onfinish');
|
||
|
dest.removeListener('close', onclose);
|
||
|
unpipe();
|
||
|
}
|
||
|
|
||
|
dest.once('finish', onfinish);
|
||
|
|
||
|
function unpipe() {
|
||
|
debug('unpipe');
|
||
|
src.unpipe(dest);
|
||
|
} // tell the dest that it's being piped to
|
||
|
|
||
|
|
||
|
dest.emit('pipe', src); // start the flow if it hasn't been started already.
|
||
|
|
||
|
if (!state.flowing) {
|
||
|
debug('pipe resume');
|
||
|
src.resume();
|
||
|
}
|
||
|
|
||
|
return dest;
|
||
|
};
|
||
|
|
||
|
function pipeOnDrain(src) {
|
||
|
return function pipeOnDrainFunctionResult() {
|
||
|
var state = src._readableState;
|
||
|
debug('pipeOnDrain', state.awaitDrain);
|
||
|
if (state.awaitDrain) state.awaitDrain--;
|
||
|
|
||
|
if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) {
|
||
|
state.flowing = true;
|
||
|
flow(src);
|
||
|
}
|
||
|
};
|
||
|
}
|
||
|
|
||
|
Readable.prototype.unpipe = function (dest) {
|
||
|
var state = this._readableState;
|
||
|
var unpipeInfo = {
|
||
|
hasUnpiped: false
|
||
|
}; // if we're not piping anywhere, then do nothing.
|
||
|
|
||
|
if (state.pipesCount === 0) return this; // just one destination. most common case.
|
||
|
|
||
|
if (state.pipesCount === 1) {
|
||
|
// passed in one, but it's not the right one.
|
||
|
if (dest && dest !== state.pipes) return this;
|
||
|
if (!dest) dest = state.pipes; // got a match.
|
||
|
|
||
|
state.pipes = null;
|
||
|
state.pipesCount = 0;
|
||
|
state.flowing = false;
|
||
|
if (dest) dest.emit('unpipe', this, unpipeInfo);
|
||
|
return this;
|
||
|
} // slow case. multiple pipe destinations.
|
||
|
|
||
|
|
||
|
if (!dest) {
|
||
|
// remove all.
|
||
|
var dests = state.pipes;
|
||
|
var len = state.pipesCount;
|
||
|
state.pipes = null;
|
||
|
state.pipesCount = 0;
|
||
|
state.flowing = false;
|
||
|
|
||
|
for (var i = 0; i < len; i++) {
|
||
|
dests[i].emit('unpipe', this, {
|
||
|
hasUnpiped: false
|
||
|
});
|
||
|
}
|
||
|
|
||
|
return this;
|
||
|
} // try to find the right one.
|
||
|
|
||
|
|
||
|
var index = indexOf(state.pipes, dest);
|
||
|
if (index === -1) return this;
|
||
|
state.pipes.splice(index, 1);
|
||
|
state.pipesCount -= 1;
|
||
|
if (state.pipesCount === 1) state.pipes = state.pipes[0];
|
||
|
dest.emit('unpipe', this, unpipeInfo);
|
||
|
return this;
|
||
|
}; // set up data events if they are asked for
|
||
|
// Ensure readable listeners eventually get something
|
||
|
|
||
|
|
||
|
Readable.prototype.on = function (ev, fn) {
|
||
|
var res = Stream.prototype.on.call(this, ev, fn);
|
||
|
var state = this._readableState;
|
||
|
|
||
|
if (ev === 'data') {
|
||
|
// update readableListening so that resume() may be a no-op
|
||
|
// a few lines down. This is needed to support once('readable').
|
||
|
state.readableListening = this.listenerCount('readable') > 0; // Try start flowing on next tick if stream isn't explicitly paused
|
||
|
|
||
|
if (state.flowing !== false) this.resume();
|
||
|
} else if (ev === 'readable') {
|
||
|
if (!state.endEmitted && !state.readableListening) {
|
||
|
state.readableListening = state.needReadable = true;
|
||
|
state.flowing = false;
|
||
|
state.emittedReadable = false;
|
||
|
debug('on readable', state.length, state.reading);
|
||
|
|
||
|
if (state.length) {
|
||
|
emitReadable(this);
|
||
|
} else if (!state.reading) {
|
||
|
process.nextTick(nReadingNextTick, this);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return res;
|
||
|
};
|
||
|
|
||
|
Readable.prototype.addListener = Readable.prototype.on;
|
||
|
|
||
|
Readable.prototype.removeListener = function (ev, fn) {
|
||
|
var res = Stream.prototype.removeListener.call(this, ev, fn);
|
||
|
|
||
|
if (ev === 'readable') {
|
||
|
// We need to check if there is someone still listening to
|
||
|
// readable and reset the state. However this needs to happen
|
||
|
// after readable has been emitted but before I/O (nextTick) to
|
||
|
// support once('readable', fn) cycles. This means that calling
|
||
|
// resume within the same tick will have no
|
||
|
// effect.
|
||
|
process.nextTick(updateReadableListening, this);
|
||
|
}
|
||
|
|
||
|
return res;
|
||
|
};
|
||
|
|
||
|
Readable.prototype.removeAllListeners = function (ev) {
|
||
|
var res = Stream.prototype.removeAllListeners.apply(this, arguments);
|
||
|
|
||
|
if (ev === 'readable' || ev === undefined) {
|
||
|
// We need to check if there is someone still listening to
|
||
|
// readable and reset the state. However this needs to happen
|
||
|
// after readable has been emitted but before I/O (nextTick) to
|
||
|
// support once('readable', fn) cycles. This means that calling
|
||
|
// resume within the same tick will have no
|
||
|
// effect.
|
||
|
process.nextTick(updateReadableListening, this);
|
||
|
}
|
||
|
|
||
|
return res;
|
||
|
};
|
||
|
|
||
|
function updateReadableListening(self) {
|
||
|
var state = self._readableState;
|
||
|
state.readableListening = self.listenerCount('readable') > 0;
|
||
|
|
||
|
if (state.resumeScheduled && !state.paused) {
|
||
|
// flowing needs to be set to true now, otherwise
|
||
|
// the upcoming resume will not flow.
|
||
|
state.flowing = true; // crude way to check if we should resume
|
||
|
} else if (self.listenerCount('data') > 0) {
|
||
|
self.resume();
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function nReadingNextTick(self) {
|
||
|
debug('readable nexttick read 0');
|
||
|
self.read(0);
|
||
|
} // pause() and resume() are remnants of the legacy readable stream API
|
||
|
// If the user uses them, then switch into old mode.
|
||
|
|
||
|
|
||
|
Readable.prototype.resume = function () {
|
||
|
var state = this._readableState;
|
||
|
|
||
|
if (!state.flowing) {
|
||
|
debug('resume'); // we flow only if there is no one listening
|
||
|
// for readable, but we still have to call
|
||
|
// resume()
|
||
|
|
||
|
state.flowing = !state.readableListening;
|
||
|
resume(this, state);
|
||
|
}
|
||
|
|
||
|
state.paused = false;
|
||
|
return this;
|
||
|
};
|
||
|
|
||
|
function resume(stream, state) {
|
||
|
if (!state.resumeScheduled) {
|
||
|
state.resumeScheduled = true;
|
||
|
process.nextTick(resume_, stream, state);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function resume_(stream, state) {
|
||
|
debug('resume', state.reading);
|
||
|
|
||
|
if (!state.reading) {
|
||
|
stream.read(0);
|
||
|
}
|
||
|
|
||
|
state.resumeScheduled = false;
|
||
|
stream.emit('resume');
|
||
|
flow(stream);
|
||
|
if (state.flowing && !state.reading) stream.read(0);
|
||
|
}
|
||
|
|
||
|
Readable.prototype.pause = function () {
|
||
|
debug('call pause flowing=%j', this._readableState.flowing);
|
||
|
|
||
|
if (this._readableState.flowing !== false) {
|
||
|
debug('pause');
|
||
|
this._readableState.flowing = false;
|
||
|
this.emit('pause');
|
||
|
}
|
||
|
|
||
|
this._readableState.paused = true;
|
||
|
return this;
|
||
|
};
|
||
|
|
||
|
function flow(stream) {
|
||
|
var state = stream._readableState;
|
||
|
debug('flow', state.flowing);
|
||
|
|
||
|
while (state.flowing && stream.read() !== null) {
|
||
|
;
|
||
|
}
|
||
|
} // wrap an old-style stream as the async data source.
|
||
|
// This is *not* part of the readable stream interface.
|
||
|
// It is an ugly unfortunate mess of history.
|
||
|
|
||
|
|
||
|
Readable.prototype.wrap = function (stream) {
|
||
|
var _this = this;
|
||
|
|
||
|
var state = this._readableState;
|
||
|
var paused = false;
|
||
|
stream.on('end', function () {
|
||
|
debug('wrapped end');
|
||
|
|
||
|
if (state.decoder && !state.ended) {
|
||
|
var chunk = state.decoder.end();
|
||
|
if (chunk && chunk.length) _this.push(chunk);
|
||
|
}
|
||
|
|
||
|
_this.push(null);
|
||
|
});
|
||
|
stream.on('data', function (chunk) {
|
||
|
debug('wrapped data');
|
||
|
if (state.decoder) chunk = state.decoder.write(chunk); // don't skip over falsy values in objectMode
|
||
|
|
||
|
if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return;
|
||
|
|
||
|
var ret = _this.push(chunk);
|
||
|
|
||
|
if (!ret) {
|
||
|
paused = true;
|
||
|
stream.pause();
|
||
|
}
|
||
|
}); // proxy all the other methods.
|
||
|
// important when wrapping filters and duplexes.
|
||
|
|
||
|
for (var i in stream) {
|
||
|
if (this[i] === undefined && typeof stream[i] === 'function') {
|
||
|
this[i] = function methodWrap(method) {
|
||
|
return function methodWrapReturnFunction() {
|
||
|
return stream[method].apply(stream, arguments);
|
||
|
};
|
||
|
}(i);
|
||
|
}
|
||
|
} // proxy certain important events.
|
||
|
|
||
|
|
||
|
for (var n = 0; n < kProxyEvents.length; n++) {
|
||
|
stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n]));
|
||
|
} // when we try to consume some more bytes, simply unpause the
|
||
|
// underlying stream.
|
||
|
|
||
|
|
||
|
this._read = function (n) {
|
||
|
debug('wrapped _read', n);
|
||
|
|
||
|
if (paused) {
|
||
|
paused = false;
|
||
|
stream.resume();
|
||
|
}
|
||
|
};
|
||
|
|
||
|
return this;
|
||
|
};
|
||
|
|
||
|
if (typeof Symbol === 'function') {
|
||
|
Readable.prototype[Symbol.asyncIterator] = function () {
|
||
|
if (createReadableStreamAsyncIterator === undefined) {
|
||
|
createReadableStreamAsyncIterator = __webpack_require__(45850);
|
||
|
}
|
||
|
|
||
|
return createReadableStreamAsyncIterator(this);
|
||
|
};
|
||
|
}
|
||
|
|
||
|
Object.defineProperty(Readable.prototype, 'readableHighWaterMark', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._readableState.highWaterMark;
|
||
|
}
|
||
|
});
|
||
|
Object.defineProperty(Readable.prototype, 'readableBuffer', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._readableState && this._readableState.buffer;
|
||
|
}
|
||
|
});
|
||
|
Object.defineProperty(Readable.prototype, 'readableFlowing', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._readableState.flowing;
|
||
|
},
|
||
|
set: function set(state) {
|
||
|
if (this._readableState) {
|
||
|
this._readableState.flowing = state;
|
||
|
}
|
||
|
}
|
||
|
}); // exposed for testing purposes only.
|
||
|
|
||
|
Readable._fromList = fromList;
|
||
|
Object.defineProperty(Readable.prototype, 'readableLength', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._readableState.length;
|
||
|
}
|
||
|
}); // Pluck off n bytes from an array of buffers.
|
||
|
// Length is the combined lengths of all the buffers in the list.
|
||
|
// This function is designed to be inlinable, so please take care when making
|
||
|
// changes to the function body.
|
||
|
|
||
|
function fromList(n, state) {
|
||
|
// nothing buffered
|
||
|
if (state.length === 0) return null;
|
||
|
var ret;
|
||
|
if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) {
|
||
|
// read it all, truncate the list
|
||
|
if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.first();else ret = state.buffer.concat(state.length);
|
||
|
state.buffer.clear();
|
||
|
} else {
|
||
|
// read part of list
|
||
|
ret = state.buffer.consume(n, state.decoder);
|
||
|
}
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
function endReadable(stream) {
|
||
|
var state = stream._readableState;
|
||
|
debug('endReadable', state.endEmitted);
|
||
|
|
||
|
if (!state.endEmitted) {
|
||
|
state.ended = true;
|
||
|
process.nextTick(endReadableNT, state, stream);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function endReadableNT(state, stream) {
|
||
|
debug('endReadableNT', state.endEmitted, state.length); // Check that we didn't get one last unshift.
|
||
|
|
||
|
if (!state.endEmitted && state.length === 0) {
|
||
|
state.endEmitted = true;
|
||
|
stream.readable = false;
|
||
|
stream.emit('end');
|
||
|
|
||
|
if (state.autoDestroy) {
|
||
|
// In case of duplex streams we need a way to detect
|
||
|
// if the writable side is ready for autoDestroy as well
|
||
|
var wState = stream._writableState;
|
||
|
|
||
|
if (!wState || wState.autoDestroy && wState.finished) {
|
||
|
stream.destroy();
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
if (typeof Symbol === 'function') {
|
||
|
Readable.from = function (iterable, opts) {
|
||
|
if (from === undefined) {
|
||
|
from = __webpack_require__(96307);
|
||
|
}
|
||
|
|
||
|
return from(Readable, iterable, opts);
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function indexOf(xs, x) {
|
||
|
for (var i = 0, l = xs.length; i < l; i++) {
|
||
|
if (xs[i] === x) return i;
|
||
|
}
|
||
|
|
||
|
return -1;
|
||
|
}
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 74605:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
// Copyright Joyent, Inc. and other Node contributors.
|
||
|
//
|
||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||
|
// copy of this software and associated documentation files (the
|
||
|
// "Software"), to deal in the Software without restriction, including
|
||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||
|
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||
|
// persons to whom the Software is furnished to do so, subject to the
|
||
|
// following conditions:
|
||
|
//
|
||
|
// The above copyright notice and this permission notice shall be included
|
||
|
// in all copies or substantial portions of the Software.
|
||
|
//
|
||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||
|
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||
|
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||
|
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||
|
// a transform stream is a readable/writable stream where you do
|
||
|
// something with the data. Sometimes it's called a "filter",
|
||
|
// but that's not a great name for it, since that implies a thing where
|
||
|
// some bits pass through, and others are simply ignored. (That would
|
||
|
// be a valid example of a transform, of course.)
|
||
|
//
|
||
|
// While the output is causally related to the input, it's not a
|
||
|
// necessarily symmetric or synchronous transformation. For example,
|
||
|
// a zlib stream might take multiple plain-text writes(), and then
|
||
|
// emit a single compressed chunk some time in the future.
|
||
|
//
|
||
|
// Here's how this works:
|
||
|
//
|
||
|
// The Transform stream has all the aspects of the readable and writable
|
||
|
// stream classes. When you write(chunk), that calls _write(chunk,cb)
|
||
|
// internally, and returns false if there's a lot of pending writes
|
||
|
// buffered up. When you call read(), that calls _read(n) until
|
||
|
// there's enough pending readable data buffered up.
|
||
|
//
|
||
|
// In a transform stream, the written data is placed in a buffer. When
|
||
|
// _read(n) is called, it transforms the queued up data, calling the
|
||
|
// buffered _write cb's as it consumes chunks. If consuming a single
|
||
|
// written chunk would result in multiple output chunks, then the first
|
||
|
// outputted bit calls the readcb, and subsequent chunks just go into
|
||
|
// the read buffer, and will cause it to emit 'readable' if necessary.
|
||
|
//
|
||
|
// This way, back-pressure is actually determined by the reading side,
|
||
|
// since _read has to be called to start processing a new chunk. However,
|
||
|
// a pathological inflate type of transform can cause excessive buffering
|
||
|
// here. For example, imagine a stream where every byte of input is
|
||
|
// interpreted as an integer from 0-255, and then results in that many
|
||
|
// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in
|
||
|
// 1kb of data being output. In this case, you could write a very small
|
||
|
// amount of input, and end up with a very large amount of output. In
|
||
|
// such a pathological inflating mechanism, there'd be no way to tell
|
||
|
// the system to stop doing the transform. A single 4MB write could
|
||
|
// cause the system to run out of memory.
|
||
|
//
|
||
|
// However, even in such a pathological case, only a single written chunk
|
||
|
// would be consumed, and then the rest would wait (un-transformed) until
|
||
|
// the results of the previous transformed chunk were consumed.
|
||
|
|
||
|
|
||
|
module.exports = Transform;
|
||
|
|
||
|
var _require$codes = __webpack_require__(4012)/* .codes */ .q,
|
||
|
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
|
||
|
ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK,
|
||
|
ERR_TRANSFORM_ALREADY_TRANSFORMING = _require$codes.ERR_TRANSFORM_ALREADY_TRANSFORMING,
|
||
|
ERR_TRANSFORM_WITH_LENGTH_0 = _require$codes.ERR_TRANSFORM_WITH_LENGTH_0;
|
||
|
|
||
|
var Duplex = __webpack_require__(56753);
|
||
|
|
||
|
__webpack_require__(94378)(Transform, Duplex);
|
||
|
|
||
|
function afterTransform(er, data) {
|
||
|
var ts = this._transformState;
|
||
|
ts.transforming = false;
|
||
|
var cb = ts.writecb;
|
||
|
|
||
|
if (cb === null) {
|
||
|
return this.emit('error', new ERR_MULTIPLE_CALLBACK());
|
||
|
}
|
||
|
|
||
|
ts.writechunk = null;
|
||
|
ts.writecb = null;
|
||
|
if (data != null) // single equals check for both `null` and `undefined`
|
||
|
this.push(data);
|
||
|
cb(er);
|
||
|
var rs = this._readableState;
|
||
|
rs.reading = false;
|
||
|
|
||
|
if (rs.needReadable || rs.length < rs.highWaterMark) {
|
||
|
this._read(rs.highWaterMark);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function Transform(options) {
|
||
|
if (!(this instanceof Transform)) return new Transform(options);
|
||
|
Duplex.call(this, options);
|
||
|
this._transformState = {
|
||
|
afterTransform: afterTransform.bind(this),
|
||
|
needTransform: false,
|
||
|
transforming: false,
|
||
|
writecb: null,
|
||
|
writechunk: null,
|
||
|
writeencoding: null
|
||
|
}; // start out asking for a readable event once data is transformed.
|
||
|
|
||
|
this._readableState.needReadable = true; // we have implemented the _read method, and done the other things
|
||
|
// that Readable wants before the first _read call, so unset the
|
||
|
// sync guard flag.
|
||
|
|
||
|
this._readableState.sync = false;
|
||
|
|
||
|
if (options) {
|
||
|
if (typeof options.transform === 'function') this._transform = options.transform;
|
||
|
if (typeof options.flush === 'function') this._flush = options.flush;
|
||
|
} // When the writable side finishes, then flush out anything remaining.
|
||
|
|
||
|
|
||
|
this.on('prefinish', prefinish);
|
||
|
}
|
||
|
|
||
|
function prefinish() {
|
||
|
var _this = this;
|
||
|
|
||
|
if (typeof this._flush === 'function' && !this._readableState.destroyed) {
|
||
|
this._flush(function (er, data) {
|
||
|
done(_this, er, data);
|
||
|
});
|
||
|
} else {
|
||
|
done(this, null, null);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
Transform.prototype.push = function (chunk, encoding) {
|
||
|
this._transformState.needTransform = false;
|
||
|
return Duplex.prototype.push.call(this, chunk, encoding);
|
||
|
}; // This is the part where you do stuff!
|
||
|
// override this function in implementation classes.
|
||
|
// 'chunk' is an input chunk.
|
||
|
//
|
||
|
// Call `push(newChunk)` to pass along transformed output
|
||
|
// to the readable side. You may call 'push' zero or more times.
|
||
|
//
|
||
|
// Call `cb(err)` when you are done with this chunk. If you pass
|
||
|
// an error, then that'll put the hurt on the whole operation. If you
|
||
|
// never call cb(), then you'll never get another chunk.
|
||
|
|
||
|
|
||
|
Transform.prototype._transform = function (chunk, encoding, cb) {
|
||
|
cb(new ERR_METHOD_NOT_IMPLEMENTED('_transform()'));
|
||
|
};
|
||
|
|
||
|
Transform.prototype._write = function (chunk, encoding, cb) {
|
||
|
var ts = this._transformState;
|
||
|
ts.writecb = cb;
|
||
|
ts.writechunk = chunk;
|
||
|
ts.writeencoding = encoding;
|
||
|
|
||
|
if (!ts.transforming) {
|
||
|
var rs = this._readableState;
|
||
|
if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark);
|
||
|
}
|
||
|
}; // Doesn't matter what the args are here.
|
||
|
// _transform does all the work.
|
||
|
// That we got here means that the readable side wants more data.
|
||
|
|
||
|
|
||
|
Transform.prototype._read = function (n) {
|
||
|
var ts = this._transformState;
|
||
|
|
||
|
if (ts.writechunk !== null && !ts.transforming) {
|
||
|
ts.transforming = true;
|
||
|
|
||
|
this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform);
|
||
|
} else {
|
||
|
// mark that we need a transform, so that any data that comes in
|
||
|
// will get processed, now that we've asked for it.
|
||
|
ts.needTransform = true;
|
||
|
}
|
||
|
};
|
||
|
|
||
|
Transform.prototype._destroy = function (err, cb) {
|
||
|
Duplex.prototype._destroy.call(this, err, function (err2) {
|
||
|
cb(err2);
|
||
|
});
|
||
|
};
|
||
|
|
||
|
function done(stream, er, data) {
|
||
|
if (er) return stream.emit('error', er);
|
||
|
if (data != null) // single equals check for both `null` and `undefined`
|
||
|
stream.push(data); // TODO(BridgeAR): Write a test for these two error cases
|
||
|
// if there's nothing in the write buffer, then that means
|
||
|
// that nothing more will ever be provided
|
||
|
|
||
|
if (stream._writableState.length) throw new ERR_TRANSFORM_WITH_LENGTH_0();
|
||
|
if (stream._transformState.transforming) throw new ERR_TRANSFORM_ALREADY_TRANSFORMING();
|
||
|
return stream.push(null);
|
||
|
}
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 64229:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
// Copyright Joyent, Inc. and other Node contributors.
|
||
|
//
|
||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||
|
// copy of this software and associated documentation files (the
|
||
|
// "Software"), to deal in the Software without restriction, including
|
||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||
|
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||
|
// persons to whom the Software is furnished to do so, subject to the
|
||
|
// following conditions:
|
||
|
//
|
||
|
// The above copyright notice and this permission notice shall be included
|
||
|
// in all copies or substantial portions of the Software.
|
||
|
//
|
||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||
|
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||
|
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||
|
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||
|
// A bit simpler than readable streams.
|
||
|
// Implement an async ._write(chunk, encoding, cb), and it'll handle all
|
||
|
// the drain event emission and buffering.
|
||
|
|
||
|
|
||
|
module.exports = Writable;
|
||
|
/* <replacement> */
|
||
|
|
||
|
function WriteReq(chunk, encoding, cb) {
|
||
|
this.chunk = chunk;
|
||
|
this.encoding = encoding;
|
||
|
this.callback = cb;
|
||
|
this.next = null;
|
||
|
} // It seems a linked list but it is not
|
||
|
// there will be only 2 of these for each stream
|
||
|
|
||
|
|
||
|
function CorkedRequest(state) {
|
||
|
var _this = this;
|
||
|
|
||
|
this.next = null;
|
||
|
this.entry = null;
|
||
|
|
||
|
this.finish = function () {
|
||
|
onCorkedFinish(_this, state);
|
||
|
};
|
||
|
}
|
||
|
/* </replacement> */
|
||
|
|
||
|
/*<replacement>*/
|
||
|
|
||
|
|
||
|
var Duplex;
|
||
|
/*</replacement>*/
|
||
|
|
||
|
Writable.WritableState = WritableState;
|
||
|
/*<replacement>*/
|
||
|
|
||
|
var internalUtil = {
|
||
|
deprecate: __webpack_require__(41159)
|
||
|
};
|
||
|
/*</replacement>*/
|
||
|
|
||
|
/*<replacement>*/
|
||
|
|
||
|
var Stream = __webpack_require__(79740);
|
||
|
/*</replacement>*/
|
||
|
|
||
|
|
||
|
var Buffer = __webpack_require__(64293).Buffer;
|
||
|
|
||
|
var OurUint8Array = global.Uint8Array || function () {};
|
||
|
|
||
|
function _uint8ArrayToBuffer(chunk) {
|
||
|
return Buffer.from(chunk);
|
||
|
}
|
||
|
|
||
|
function _isUint8Array(obj) {
|
||
|
return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
|
||
|
}
|
||
|
|
||
|
var destroyImpl = __webpack_require__(61195);
|
||
|
|
||
|
var _require = __webpack_require__(82457),
|
||
|
getHighWaterMark = _require.getHighWaterMark;
|
||
|
|
||
|
var _require$codes = __webpack_require__(4012)/* .codes */ .q,
|
||
|
ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,
|
||
|
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
|
||
|
ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK,
|
||
|
ERR_STREAM_CANNOT_PIPE = _require$codes.ERR_STREAM_CANNOT_PIPE,
|
||
|
ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED,
|
||
|
ERR_STREAM_NULL_VALUES = _require$codes.ERR_STREAM_NULL_VALUES,
|
||
|
ERR_STREAM_WRITE_AFTER_END = _require$codes.ERR_STREAM_WRITE_AFTER_END,
|
||
|
ERR_UNKNOWN_ENCODING = _require$codes.ERR_UNKNOWN_ENCODING;
|
||
|
|
||
|
var errorOrDestroy = destroyImpl.errorOrDestroy;
|
||
|
|
||
|
__webpack_require__(94378)(Writable, Stream);
|
||
|
|
||
|
function nop() {}
|
||
|
|
||
|
function WritableState(options, stream, isDuplex) {
|
||
|
Duplex = Duplex || __webpack_require__(56753);
|
||
|
options = options || {}; // Duplex streams are both readable and writable, but share
|
||
|
// the same options object.
|
||
|
// However, some cases require setting options to different
|
||
|
// values for the readable and the writable sides of the duplex stream,
|
||
|
// e.g. options.readableObjectMode vs. options.writableObjectMode, etc.
|
||
|
|
||
|
if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; // object stream flag to indicate whether or not this stream
|
||
|
// contains buffers or objects.
|
||
|
|
||
|
this.objectMode = !!options.objectMode;
|
||
|
if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode; // the point at which write() starts returning false
|
||
|
// Note: 0 is a valid value, means that we always return false if
|
||
|
// the entire buffer is not flushed immediately on write()
|
||
|
|
||
|
this.highWaterMark = getHighWaterMark(this, options, 'writableHighWaterMark', isDuplex); // if _final has been called
|
||
|
|
||
|
this.finalCalled = false; // drain event flag.
|
||
|
|
||
|
this.needDrain = false; // at the start of calling end()
|
||
|
|
||
|
this.ending = false; // when end() has been called, and returned
|
||
|
|
||
|
this.ended = false; // when 'finish' is emitted
|
||
|
|
||
|
this.finished = false; // has it been destroyed
|
||
|
|
||
|
this.destroyed = false; // should we decode strings into buffers before passing to _write?
|
||
|
// this is here so that some node-core streams can optimize string
|
||
|
// handling at a lower level.
|
||
|
|
||
|
var noDecode = options.decodeStrings === false;
|
||
|
this.decodeStrings = !noDecode; // Crypto is kind of old and crusty. Historically, its default string
|
||
|
// encoding is 'binary' so we have to make this configurable.
|
||
|
// Everything else in the universe uses 'utf8', though.
|
||
|
|
||
|
this.defaultEncoding = options.defaultEncoding || 'utf8'; // not an actual buffer we keep track of, but a measurement
|
||
|
// of how much we're waiting to get pushed to some underlying
|
||
|
// socket or file.
|
||
|
|
||
|
this.length = 0; // a flag to see when we're in the middle of a write.
|
||
|
|
||
|
this.writing = false; // when true all writes will be buffered until .uncork() call
|
||
|
|
||
|
this.corked = 0; // a flag to be able to tell if the onwrite cb is called immediately,
|
||
|
// or on a later tick. We set this to true at first, because any
|
||
|
// actions that shouldn't happen until "later" should generally also
|
||
|
// not happen before the first write call.
|
||
|
|
||
|
this.sync = true; // a flag to know if we're processing previously buffered items, which
|
||
|
// may call the _write() callback in the same tick, so that we don't
|
||
|
// end up in an overlapped onwrite situation.
|
||
|
|
||
|
this.bufferProcessing = false; // the callback that's passed to _write(chunk,cb)
|
||
|
|
||
|
this.onwrite = function (er) {
|
||
|
onwrite(stream, er);
|
||
|
}; // the callback that the user supplies to write(chunk,encoding,cb)
|
||
|
|
||
|
|
||
|
this.writecb = null; // the amount that is being written when _write is called.
|
||
|
|
||
|
this.writelen = 0;
|
||
|
this.bufferedRequest = null;
|
||
|
this.lastBufferedRequest = null; // number of pending user-supplied write callbacks
|
||
|
// this must be 0 before 'finish' can be emitted
|
||
|
|
||
|
this.pendingcb = 0; // emit prefinish if the only thing we're waiting for is _write cbs
|
||
|
// This is relevant for synchronous Transform streams
|
||
|
|
||
|
this.prefinished = false; // True if the error was already emitted and should not be thrown again
|
||
|
|
||
|
this.errorEmitted = false; // Should close be emitted on destroy. Defaults to true.
|
||
|
|
||
|
this.emitClose = options.emitClose !== false; // Should .destroy() be called after 'finish' (and potentially 'end')
|
||
|
|
||
|
this.autoDestroy = !!options.autoDestroy; // count buffered requests
|
||
|
|
||
|
this.bufferedRequestCount = 0; // allocate the first CorkedRequest, there is always
|
||
|
// one allocated and free to use, and we maintain at most two
|
||
|
|
||
|
this.corkedRequestsFree = new CorkedRequest(this);
|
||
|
}
|
||
|
|
||
|
WritableState.prototype.getBuffer = function getBuffer() {
|
||
|
var current = this.bufferedRequest;
|
||
|
var out = [];
|
||
|
|
||
|
while (current) {
|
||
|
out.push(current);
|
||
|
current = current.next;
|
||
|
}
|
||
|
|
||
|
return out;
|
||
|
};
|
||
|
|
||
|
(function () {
|
||
|
try {
|
||
|
Object.defineProperty(WritableState.prototype, 'buffer', {
|
||
|
get: internalUtil.deprecate(function writableStateBufferGetter() {
|
||
|
return this.getBuffer();
|
||
|
}, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003')
|
||
|
});
|
||
|
} catch (_) {}
|
||
|
})(); // Test _writableState for inheritance to account for Duplex streams,
|
||
|
// whose prototype chain only points to Readable.
|
||
|
|
||
|
|
||
|
var realHasInstance;
|
||
|
|
||
|
if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') {
|
||
|
realHasInstance = Function.prototype[Symbol.hasInstance];
|
||
|
Object.defineProperty(Writable, Symbol.hasInstance, {
|
||
|
value: function value(object) {
|
||
|
if (realHasInstance.call(this, object)) return true;
|
||
|
if (this !== Writable) return false;
|
||
|
return object && object._writableState instanceof WritableState;
|
||
|
}
|
||
|
});
|
||
|
} else {
|
||
|
realHasInstance = function realHasInstance(object) {
|
||
|
return object instanceof this;
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function Writable(options) {
|
||
|
Duplex = Duplex || __webpack_require__(56753); // Writable ctor is applied to Duplexes, too.
|
||
|
// `realHasInstance` is necessary because using plain `instanceof`
|
||
|
// would return false, as no `_writableState` property is attached.
|
||
|
// Trying to use the custom `instanceof` for Writable here will also break the
|
||
|
// Node.js LazyTransform implementation, which has a non-trivial getter for
|
||
|
// `_writableState` that would lead to infinite recursion.
|
||
|
// Checking for a Stream.Duplex instance is faster here instead of inside
|
||
|
// the WritableState constructor, at least with V8 6.5
|
||
|
|
||
|
var isDuplex = this instanceof Duplex;
|
||
|
if (!isDuplex && !realHasInstance.call(Writable, this)) return new Writable(options);
|
||
|
this._writableState = new WritableState(options, this, isDuplex); // legacy.
|
||
|
|
||
|
this.writable = true;
|
||
|
|
||
|
if (options) {
|
||
|
if (typeof options.write === 'function') this._write = options.write;
|
||
|
if (typeof options.writev === 'function') this._writev = options.writev;
|
||
|
if (typeof options.destroy === 'function') this._destroy = options.destroy;
|
||
|
if (typeof options.final === 'function') this._final = options.final;
|
||
|
}
|
||
|
|
||
|
Stream.call(this);
|
||
|
} // Otherwise people can pipe Writable streams, which is just wrong.
|
||
|
|
||
|
|
||
|
Writable.prototype.pipe = function () {
|
||
|
errorOrDestroy(this, new ERR_STREAM_CANNOT_PIPE());
|
||
|
};
|
||
|
|
||
|
function writeAfterEnd(stream, cb) {
|
||
|
var er = new ERR_STREAM_WRITE_AFTER_END(); // TODO: defer error events consistently everywhere, not just the cb
|
||
|
|
||
|
errorOrDestroy(stream, er);
|
||
|
process.nextTick(cb, er);
|
||
|
} // Checks that a user-supplied chunk is valid, especially for the particular
|
||
|
// mode the stream is in. Currently this means that `null` is never accepted
|
||
|
// and undefined/non-string values are only allowed in object mode.
|
||
|
|
||
|
|
||
|
function validChunk(stream, state, chunk, cb) {
|
||
|
var er;
|
||
|
|
||
|
if (chunk === null) {
|
||
|
er = new ERR_STREAM_NULL_VALUES();
|
||
|
} else if (typeof chunk !== 'string' && !state.objectMode) {
|
||
|
er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer'], chunk);
|
||
|
}
|
||
|
|
||
|
if (er) {
|
||
|
errorOrDestroy(stream, er);
|
||
|
process.nextTick(cb, er);
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
return true;
|
||
|
}
|
||
|
|
||
|
Writable.prototype.write = function (chunk, encoding, cb) {
|
||
|
var state = this._writableState;
|
||
|
var ret = false;
|
||
|
|
||
|
var isBuf = !state.objectMode && _isUint8Array(chunk);
|
||
|
|
||
|
if (isBuf && !Buffer.isBuffer(chunk)) {
|
||
|
chunk = _uint8ArrayToBuffer(chunk);
|
||
|
}
|
||
|
|
||
|
if (typeof encoding === 'function') {
|
||
|
cb = encoding;
|
||
|
encoding = null;
|
||
|
}
|
||
|
|
||
|
if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding;
|
||
|
if (typeof cb !== 'function') cb = nop;
|
||
|
if (state.ending) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) {
|
||
|
state.pendingcb++;
|
||
|
ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb);
|
||
|
}
|
||
|
return ret;
|
||
|
};
|
||
|
|
||
|
Writable.prototype.cork = function () {
|
||
|
this._writableState.corked++;
|
||
|
};
|
||
|
|
||
|
Writable.prototype.uncork = function () {
|
||
|
var state = this._writableState;
|
||
|
|
||
|
if (state.corked) {
|
||
|
state.corked--;
|
||
|
if (!state.writing && !state.corked && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state);
|
||
|
}
|
||
|
};
|
||
|
|
||
|
Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) {
|
||
|
// node::ParseEncoding() requires lower case.
|
||
|
if (typeof encoding === 'string') encoding = encoding.toLowerCase();
|
||
|
if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new ERR_UNKNOWN_ENCODING(encoding);
|
||
|
this._writableState.defaultEncoding = encoding;
|
||
|
return this;
|
||
|
};
|
||
|
|
||
|
Object.defineProperty(Writable.prototype, 'writableBuffer', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._writableState && this._writableState.getBuffer();
|
||
|
}
|
||
|
});
|
||
|
|
||
|
function decodeChunk(state, chunk, encoding) {
|
||
|
if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') {
|
||
|
chunk = Buffer.from(chunk, encoding);
|
||
|
}
|
||
|
|
||
|
return chunk;
|
||
|
}
|
||
|
|
||
|
Object.defineProperty(Writable.prototype, 'writableHighWaterMark', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._writableState.highWaterMark;
|
||
|
}
|
||
|
}); // if we're already writing something, then just put this
|
||
|
// in the queue, and wait our turn. Otherwise, call _write
|
||
|
// If we return false, then we need a drain event, so set that flag.
|
||
|
|
||
|
function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) {
|
||
|
if (!isBuf) {
|
||
|
var newChunk = decodeChunk(state, chunk, encoding);
|
||
|
|
||
|
if (chunk !== newChunk) {
|
||
|
isBuf = true;
|
||
|
encoding = 'buffer';
|
||
|
chunk = newChunk;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
var len = state.objectMode ? 1 : chunk.length;
|
||
|
state.length += len;
|
||
|
var ret = state.length < state.highWaterMark; // we must ensure that previous needDrain will not be reset to false.
|
||
|
|
||
|
if (!ret) state.needDrain = true;
|
||
|
|
||
|
if (state.writing || state.corked) {
|
||
|
var last = state.lastBufferedRequest;
|
||
|
state.lastBufferedRequest = {
|
||
|
chunk: chunk,
|
||
|
encoding: encoding,
|
||
|
isBuf: isBuf,
|
||
|
callback: cb,
|
||
|
next: null
|
||
|
};
|
||
|
|
||
|
if (last) {
|
||
|
last.next = state.lastBufferedRequest;
|
||
|
} else {
|
||
|
state.bufferedRequest = state.lastBufferedRequest;
|
||
|
}
|
||
|
|
||
|
state.bufferedRequestCount += 1;
|
||
|
} else {
|
||
|
doWrite(stream, state, false, len, chunk, encoding, cb);
|
||
|
}
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
function doWrite(stream, state, writev, len, chunk, encoding, cb) {
|
||
|
state.writelen = len;
|
||
|
state.writecb = cb;
|
||
|
state.writing = true;
|
||
|
state.sync = true;
|
||
|
if (state.destroyed) state.onwrite(new ERR_STREAM_DESTROYED('write'));else if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite);
|
||
|
state.sync = false;
|
||
|
}
|
||
|
|
||
|
function onwriteError(stream, state, sync, er, cb) {
|
||
|
--state.pendingcb;
|
||
|
|
||
|
if (sync) {
|
||
|
// defer the callback if we are being called synchronously
|
||
|
// to avoid piling up things on the stack
|
||
|
process.nextTick(cb, er); // this can emit finish, and it will always happen
|
||
|
// after error
|
||
|
|
||
|
process.nextTick(finishMaybe, stream, state);
|
||
|
stream._writableState.errorEmitted = true;
|
||
|
errorOrDestroy(stream, er);
|
||
|
} else {
|
||
|
// the caller expect this to happen before if
|
||
|
// it is async
|
||
|
cb(er);
|
||
|
stream._writableState.errorEmitted = true;
|
||
|
errorOrDestroy(stream, er); // this can emit finish, but finish must
|
||
|
// always follow error
|
||
|
|
||
|
finishMaybe(stream, state);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function onwriteStateUpdate(state) {
|
||
|
state.writing = false;
|
||
|
state.writecb = null;
|
||
|
state.length -= state.writelen;
|
||
|
state.writelen = 0;
|
||
|
}
|
||
|
|
||
|
function onwrite(stream, er) {
|
||
|
var state = stream._writableState;
|
||
|
var sync = state.sync;
|
||
|
var cb = state.writecb;
|
||
|
if (typeof cb !== 'function') throw new ERR_MULTIPLE_CALLBACK();
|
||
|
onwriteStateUpdate(state);
|
||
|
if (er) onwriteError(stream, state, sync, er, cb);else {
|
||
|
// Check if we're actually ready to finish, but don't emit yet
|
||
|
var finished = needFinish(state) || stream.destroyed;
|
||
|
|
||
|
if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) {
|
||
|
clearBuffer(stream, state);
|
||
|
}
|
||
|
|
||
|
if (sync) {
|
||
|
process.nextTick(afterWrite, stream, state, finished, cb);
|
||
|
} else {
|
||
|
afterWrite(stream, state, finished, cb);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function afterWrite(stream, state, finished, cb) {
|
||
|
if (!finished) onwriteDrain(stream, state);
|
||
|
state.pendingcb--;
|
||
|
cb();
|
||
|
finishMaybe(stream, state);
|
||
|
} // Must force callback to be called on nextTick, so that we don't
|
||
|
// emit 'drain' before the write() consumer gets the 'false' return
|
||
|
// value, and has a chance to attach a 'drain' listener.
|
||
|
|
||
|
|
||
|
function onwriteDrain(stream, state) {
|
||
|
if (state.length === 0 && state.needDrain) {
|
||
|
state.needDrain = false;
|
||
|
stream.emit('drain');
|
||
|
}
|
||
|
} // if there's something in the buffer waiting, then process it
|
||
|
|
||
|
|
||
|
function clearBuffer(stream, state) {
|
||
|
state.bufferProcessing = true;
|
||
|
var entry = state.bufferedRequest;
|
||
|
|
||
|
if (stream._writev && entry && entry.next) {
|
||
|
// Fast case, write everything using _writev()
|
||
|
var l = state.bufferedRequestCount;
|
||
|
var buffer = new Array(l);
|
||
|
var holder = state.corkedRequestsFree;
|
||
|
holder.entry = entry;
|
||
|
var count = 0;
|
||
|
var allBuffers = true;
|
||
|
|
||
|
while (entry) {
|
||
|
buffer[count] = entry;
|
||
|
if (!entry.isBuf) allBuffers = false;
|
||
|
entry = entry.next;
|
||
|
count += 1;
|
||
|
}
|
||
|
|
||
|
buffer.allBuffers = allBuffers;
|
||
|
doWrite(stream, state, true, state.length, buffer, '', holder.finish); // doWrite is almost always async, defer these to save a bit of time
|
||
|
// as the hot path ends with doWrite
|
||
|
|
||
|
state.pendingcb++;
|
||
|
state.lastBufferedRequest = null;
|
||
|
|
||
|
if (holder.next) {
|
||
|
state.corkedRequestsFree = holder.next;
|
||
|
holder.next = null;
|
||
|
} else {
|
||
|
state.corkedRequestsFree = new CorkedRequest(state);
|
||
|
}
|
||
|
|
||
|
state.bufferedRequestCount = 0;
|
||
|
} else {
|
||
|
// Slow case, write chunks one-by-one
|
||
|
while (entry) {
|
||
|
var chunk = entry.chunk;
|
||
|
var encoding = entry.encoding;
|
||
|
var cb = entry.callback;
|
||
|
var len = state.objectMode ? 1 : chunk.length;
|
||
|
doWrite(stream, state, false, len, chunk, encoding, cb);
|
||
|
entry = entry.next;
|
||
|
state.bufferedRequestCount--; // if we didn't call the onwrite immediately, then
|
||
|
// it means that we need to wait until it does.
|
||
|
// also, that means that the chunk and cb are currently
|
||
|
// being processed, so move the buffer counter past them.
|
||
|
|
||
|
if (state.writing) {
|
||
|
break;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
if (entry === null) state.lastBufferedRequest = null;
|
||
|
}
|
||
|
|
||
|
state.bufferedRequest = entry;
|
||
|
state.bufferProcessing = false;
|
||
|
}
|
||
|
|
||
|
Writable.prototype._write = function (chunk, encoding, cb) {
|
||
|
cb(new ERR_METHOD_NOT_IMPLEMENTED('_write()'));
|
||
|
};
|
||
|
|
||
|
Writable.prototype._writev = null;
|
||
|
|
||
|
Writable.prototype.end = function (chunk, encoding, cb) {
|
||
|
var state = this._writableState;
|
||
|
|
||
|
if (typeof chunk === 'function') {
|
||
|
cb = chunk;
|
||
|
chunk = null;
|
||
|
encoding = null;
|
||
|
} else if (typeof encoding === 'function') {
|
||
|
cb = encoding;
|
||
|
encoding = null;
|
||
|
}
|
||
|
|
||
|
if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); // .end() fully uncorks
|
||
|
|
||
|
if (state.corked) {
|
||
|
state.corked = 1;
|
||
|
this.uncork();
|
||
|
} // ignore unnecessary end() calls.
|
||
|
|
||
|
|
||
|
if (!state.ending) endWritable(this, state, cb);
|
||
|
return this;
|
||
|
};
|
||
|
|
||
|
Object.defineProperty(Writable.prototype, 'writableLength', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
return this._writableState.length;
|
||
|
}
|
||
|
});
|
||
|
|
||
|
function needFinish(state) {
|
||
|
return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing;
|
||
|
}
|
||
|
|
||
|
function callFinal(stream, state) {
|
||
|
stream._final(function (err) {
|
||
|
state.pendingcb--;
|
||
|
|
||
|
if (err) {
|
||
|
errorOrDestroy(stream, err);
|
||
|
}
|
||
|
|
||
|
state.prefinished = true;
|
||
|
stream.emit('prefinish');
|
||
|
finishMaybe(stream, state);
|
||
|
});
|
||
|
}
|
||
|
|
||
|
function prefinish(stream, state) {
|
||
|
if (!state.prefinished && !state.finalCalled) {
|
||
|
if (typeof stream._final === 'function' && !state.destroyed) {
|
||
|
state.pendingcb++;
|
||
|
state.finalCalled = true;
|
||
|
process.nextTick(callFinal, stream, state);
|
||
|
} else {
|
||
|
state.prefinished = true;
|
||
|
stream.emit('prefinish');
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function finishMaybe(stream, state) {
|
||
|
var need = needFinish(state);
|
||
|
|
||
|
if (need) {
|
||
|
prefinish(stream, state);
|
||
|
|
||
|
if (state.pendingcb === 0) {
|
||
|
state.finished = true;
|
||
|
stream.emit('finish');
|
||
|
|
||
|
if (state.autoDestroy) {
|
||
|
// In case of duplex streams we need a way to detect
|
||
|
// if the readable side is ready for autoDestroy as well
|
||
|
var rState = stream._readableState;
|
||
|
|
||
|
if (!rState || rState.autoDestroy && rState.endEmitted) {
|
||
|
stream.destroy();
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return need;
|
||
|
}
|
||
|
|
||
|
function endWritable(stream, state, cb) {
|
||
|
state.ending = true;
|
||
|
finishMaybe(stream, state);
|
||
|
|
||
|
if (cb) {
|
||
|
if (state.finished) process.nextTick(cb);else stream.once('finish', cb);
|
||
|
}
|
||
|
|
||
|
state.ended = true;
|
||
|
stream.writable = false;
|
||
|
}
|
||
|
|
||
|
function onCorkedFinish(corkReq, state, err) {
|
||
|
var entry = corkReq.entry;
|
||
|
corkReq.entry = null;
|
||
|
|
||
|
while (entry) {
|
||
|
var cb = entry.callback;
|
||
|
state.pendingcb--;
|
||
|
cb(err);
|
||
|
entry = entry.next;
|
||
|
} // reuse the free corkReq.
|
||
|
|
||
|
|
||
|
state.corkedRequestsFree.next = corkReq;
|
||
|
}
|
||
|
|
||
|
Object.defineProperty(Writable.prototype, 'destroyed', {
|
||
|
// making it explicit this property is not enumerable
|
||
|
// because otherwise some prototype manipulation in
|
||
|
// userland will fail
|
||
|
enumerable: false,
|
||
|
get: function get() {
|
||
|
if (this._writableState === undefined) {
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
return this._writableState.destroyed;
|
||
|
},
|
||
|
set: function set(value) {
|
||
|
// we ignore the value if the stream
|
||
|
// has not been initialized yet
|
||
|
if (!this._writableState) {
|
||
|
return;
|
||
|
} // backward compatibility, the user is explicitly
|
||
|
// managing destroyed
|
||
|
|
||
|
|
||
|
this._writableState.destroyed = value;
|
||
|
}
|
||
|
});
|
||
|
Writable.prototype.destroy = destroyImpl.destroy;
|
||
|
Writable.prototype._undestroy = destroyImpl.undestroy;
|
||
|
|
||
|
Writable.prototype._destroy = function (err, cb) {
|
||
|
cb(err);
|
||
|
};
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 45850:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
|
||
|
|
||
|
var _Object$setPrototypeO;
|
||
|
|
||
|
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
|
||
|
|
||
|
var finished = __webpack_require__(8610);
|
||
|
|
||
|
var kLastResolve = Symbol('lastResolve');
|
||
|
var kLastReject = Symbol('lastReject');
|
||
|
var kError = Symbol('error');
|
||
|
var kEnded = Symbol('ended');
|
||
|
var kLastPromise = Symbol('lastPromise');
|
||
|
var kHandlePromise = Symbol('handlePromise');
|
||
|
var kStream = Symbol('stream');
|
||
|
|
||
|
function createIterResult(value, done) {
|
||
|
return {
|
||
|
value: value,
|
||
|
done: done
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function readAndResolve(iter) {
|
||
|
var resolve = iter[kLastResolve];
|
||
|
|
||
|
if (resolve !== null) {
|
||
|
var data = iter[kStream].read(); // we defer if data is null
|
||
|
// we can be expecting either 'end' or
|
||
|
// 'error'
|
||
|
|
||
|
if (data !== null) {
|
||
|
iter[kLastPromise] = null;
|
||
|
iter[kLastResolve] = null;
|
||
|
iter[kLastReject] = null;
|
||
|
resolve(createIterResult(data, false));
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function onReadable(iter) {
|
||
|
// we wait for the next tick, because it might
|
||
|
// emit an error with process.nextTick
|
||
|
process.nextTick(readAndResolve, iter);
|
||
|
}
|
||
|
|
||
|
function wrapForNext(lastPromise, iter) {
|
||
|
return function (resolve, reject) {
|
||
|
lastPromise.then(function () {
|
||
|
if (iter[kEnded]) {
|
||
|
resolve(createIterResult(undefined, true));
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
iter[kHandlePromise](resolve, reject);
|
||
|
}, reject);
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var AsyncIteratorPrototype = Object.getPrototypeOf(function () {});
|
||
|
var ReadableStreamAsyncIteratorPrototype = Object.setPrototypeOf((_Object$setPrototypeO = {
|
||
|
get stream() {
|
||
|
return this[kStream];
|
||
|
},
|
||
|
|
||
|
next: function next() {
|
||
|
var _this = this;
|
||
|
|
||
|
// if we have detected an error in the meanwhile
|
||
|
// reject straight away
|
||
|
var error = this[kError];
|
||
|
|
||
|
if (error !== null) {
|
||
|
return Promise.reject(error);
|
||
|
}
|
||
|
|
||
|
if (this[kEnded]) {
|
||
|
return Promise.resolve(createIterResult(undefined, true));
|
||
|
}
|
||
|
|
||
|
if (this[kStream].destroyed) {
|
||
|
// We need to defer via nextTick because if .destroy(err) is
|
||
|
// called, the error will be emitted via nextTick, and
|
||
|
// we cannot guarantee that there is no error lingering around
|
||
|
// waiting to be emitted.
|
||
|
return new Promise(function (resolve, reject) {
|
||
|
process.nextTick(function () {
|
||
|
if (_this[kError]) {
|
||
|
reject(_this[kError]);
|
||
|
} else {
|
||
|
resolve(createIterResult(undefined, true));
|
||
|
}
|
||
|
});
|
||
|
});
|
||
|
} // if we have multiple next() calls
|
||
|
// we will wait for the previous Promise to finish
|
||
|
// this logic is optimized to support for await loops,
|
||
|
// where next() is only called once at a time
|
||
|
|
||
|
|
||
|
var lastPromise = this[kLastPromise];
|
||
|
var promise;
|
||
|
|
||
|
if (lastPromise) {
|
||
|
promise = new Promise(wrapForNext(lastPromise, this));
|
||
|
} else {
|
||
|
// fast path needed to support multiple this.push()
|
||
|
// without triggering the next() queue
|
||
|
var data = this[kStream].read();
|
||
|
|
||
|
if (data !== null) {
|
||
|
return Promise.resolve(createIterResult(data, false));
|
||
|
}
|
||
|
|
||
|
promise = new Promise(this[kHandlePromise]);
|
||
|
}
|
||
|
|
||
|
this[kLastPromise] = promise;
|
||
|
return promise;
|
||
|
}
|
||
|
}, _defineProperty(_Object$setPrototypeO, Symbol.asyncIterator, function () {
|
||
|
return this;
|
||
|
}), _defineProperty(_Object$setPrototypeO, "return", function _return() {
|
||
|
var _this2 = this;
|
||
|
|
||
|
// destroy(err, cb) is a private API
|
||
|
// we can guarantee we have that here, because we control the
|
||
|
// Readable class this is attached to
|
||
|
return new Promise(function (resolve, reject) {
|
||
|
_this2[kStream].destroy(null, function (err) {
|
||
|
if (err) {
|
||
|
reject(err);
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
resolve(createIterResult(undefined, true));
|
||
|
});
|
||
|
});
|
||
|
}), _Object$setPrototypeO), AsyncIteratorPrototype);
|
||
|
|
||
|
var createReadableStreamAsyncIterator = function createReadableStreamAsyncIterator(stream) {
|
||
|
var _Object$create;
|
||
|
|
||
|
var iterator = Object.create(ReadableStreamAsyncIteratorPrototype, (_Object$create = {}, _defineProperty(_Object$create, kStream, {
|
||
|
value: stream,
|
||
|
writable: true
|
||
|
}), _defineProperty(_Object$create, kLastResolve, {
|
||
|
value: null,
|
||
|
writable: true
|
||
|
}), _defineProperty(_Object$create, kLastReject, {
|
||
|
value: null,
|
||
|
writable: true
|
||
|
}), _defineProperty(_Object$create, kError, {
|
||
|
value: null,
|
||
|
writable: true
|
||
|
}), _defineProperty(_Object$create, kEnded, {
|
||
|
value: stream._readableState.endEmitted,
|
||
|
writable: true
|
||
|
}), _defineProperty(_Object$create, kHandlePromise, {
|
||
|
value: function value(resolve, reject) {
|
||
|
var data = iterator[kStream].read();
|
||
|
|
||
|
if (data) {
|
||
|
iterator[kLastPromise] = null;
|
||
|
iterator[kLastResolve] = null;
|
||
|
iterator[kLastReject] = null;
|
||
|
resolve(createIterResult(data, false));
|
||
|
} else {
|
||
|
iterator[kLastResolve] = resolve;
|
||
|
iterator[kLastReject] = reject;
|
||
|
}
|
||
|
},
|
||
|
writable: true
|
||
|
}), _Object$create));
|
||
|
iterator[kLastPromise] = null;
|
||
|
finished(stream, function (err) {
|
||
|
if (err && err.code !== 'ERR_STREAM_PREMATURE_CLOSE') {
|
||
|
var reject = iterator[kLastReject]; // reject if we are waiting for data in the Promise
|
||
|
// returned by next() and store the error
|
||
|
|
||
|
if (reject !== null) {
|
||
|
iterator[kLastPromise] = null;
|
||
|
iterator[kLastResolve] = null;
|
||
|
iterator[kLastReject] = null;
|
||
|
reject(err);
|
||
|
}
|
||
|
|
||
|
iterator[kError] = err;
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
var resolve = iterator[kLastResolve];
|
||
|
|
||
|
if (resolve !== null) {
|
||
|
iterator[kLastPromise] = null;
|
||
|
iterator[kLastResolve] = null;
|
||
|
iterator[kLastReject] = null;
|
||
|
resolve(createIterResult(undefined, true));
|
||
|
}
|
||
|
|
||
|
iterator[kEnded] = true;
|
||
|
});
|
||
|
stream.on('readable', onReadable.bind(null, iterator));
|
||
|
return iterator;
|
||
|
};
|
||
|
|
||
|
module.exports = createReadableStreamAsyncIterator;
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 57327:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
|
||
|
|
||
|
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; }
|
||
|
|
||
|
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { _defineProperty(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; }
|
||
|
|
||
|
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
|
||
|
|
||
|
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
|
||
|
|
||
|
function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }
|
||
|
|
||
|
function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }
|
||
|
|
||
|
var _require = __webpack_require__(64293),
|
||
|
Buffer = _require.Buffer;
|
||
|
|
||
|
var _require2 = __webpack_require__(31669),
|
||
|
inspect = _require2.inspect;
|
||
|
|
||
|
var custom = inspect && inspect.custom || 'inspect';
|
||
|
|
||
|
function copyBuffer(src, target, offset) {
|
||
|
Buffer.prototype.copy.call(src, target, offset);
|
||
|
}
|
||
|
|
||
|
module.exports =
|
||
|
/*#__PURE__*/
|
||
|
function () {
|
||
|
function BufferList() {
|
||
|
_classCallCheck(this, BufferList);
|
||
|
|
||
|
this.head = null;
|
||
|
this.tail = null;
|
||
|
this.length = 0;
|
||
|
}
|
||
|
|
||
|
_createClass(BufferList, [{
|
||
|
key: "push",
|
||
|
value: function push(v) {
|
||
|
var entry = {
|
||
|
data: v,
|
||
|
next: null
|
||
|
};
|
||
|
if (this.length > 0) this.tail.next = entry;else this.head = entry;
|
||
|
this.tail = entry;
|
||
|
++this.length;
|
||
|
}
|
||
|
}, {
|
||
|
key: "unshift",
|
||
|
value: function unshift(v) {
|
||
|
var entry = {
|
||
|
data: v,
|
||
|
next: this.head
|
||
|
};
|
||
|
if (this.length === 0) this.tail = entry;
|
||
|
this.head = entry;
|
||
|
++this.length;
|
||
|
}
|
||
|
}, {
|
||
|
key: "shift",
|
||
|
value: function shift() {
|
||
|
if (this.length === 0) return;
|
||
|
var ret = this.head.data;
|
||
|
if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next;
|
||
|
--this.length;
|
||
|
return ret;
|
||
|
}
|
||
|
}, {
|
||
|
key: "clear",
|
||
|
value: function clear() {
|
||
|
this.head = this.tail = null;
|
||
|
this.length = 0;
|
||
|
}
|
||
|
}, {
|
||
|
key: "join",
|
||
|
value: function join(s) {
|
||
|
if (this.length === 0) return '';
|
||
|
var p = this.head;
|
||
|
var ret = '' + p.data;
|
||
|
|
||
|
while (p = p.next) {
|
||
|
ret += s + p.data;
|
||
|
}
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
}, {
|
||
|
key: "concat",
|
||
|
value: function concat(n) {
|
||
|
if (this.length === 0) return Buffer.alloc(0);
|
||
|
var ret = Buffer.allocUnsafe(n >>> 0);
|
||
|
var p = this.head;
|
||
|
var i = 0;
|
||
|
|
||
|
while (p) {
|
||
|
copyBuffer(p.data, ret, i);
|
||
|
i += p.data.length;
|
||
|
p = p.next;
|
||
|
}
|
||
|
|
||
|
return ret;
|
||
|
} // Consumes a specified amount of bytes or characters from the buffered data.
|
||
|
|
||
|
}, {
|
||
|
key: "consume",
|
||
|
value: function consume(n, hasStrings) {
|
||
|
var ret;
|
||
|
|
||
|
if (n < this.head.data.length) {
|
||
|
// `slice` is the same for buffers and strings.
|
||
|
ret = this.head.data.slice(0, n);
|
||
|
this.head.data = this.head.data.slice(n);
|
||
|
} else if (n === this.head.data.length) {
|
||
|
// First chunk is a perfect match.
|
||
|
ret = this.shift();
|
||
|
} else {
|
||
|
// Result spans more than one buffer.
|
||
|
ret = hasStrings ? this._getString(n) : this._getBuffer(n);
|
||
|
}
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
}, {
|
||
|
key: "first",
|
||
|
value: function first() {
|
||
|
return this.head.data;
|
||
|
} // Consumes a specified amount of characters from the buffered data.
|
||
|
|
||
|
}, {
|
||
|
key: "_getString",
|
||
|
value: function _getString(n) {
|
||
|
var p = this.head;
|
||
|
var c = 1;
|
||
|
var ret = p.data;
|
||
|
n -= ret.length;
|
||
|
|
||
|
while (p = p.next) {
|
||
|
var str = p.data;
|
||
|
var nb = n > str.length ? str.length : n;
|
||
|
if (nb === str.length) ret += str;else ret += str.slice(0, n);
|
||
|
n -= nb;
|
||
|
|
||
|
if (n === 0) {
|
||
|
if (nb === str.length) {
|
||
|
++c;
|
||
|
if (p.next) this.head = p.next;else this.head = this.tail = null;
|
||
|
} else {
|
||
|
this.head = p;
|
||
|
p.data = str.slice(nb);
|
||
|
}
|
||
|
|
||
|
break;
|
||
|
}
|
||
|
|
||
|
++c;
|
||
|
}
|
||
|
|
||
|
this.length -= c;
|
||
|
return ret;
|
||
|
} // Consumes a specified amount of bytes from the buffered data.
|
||
|
|
||
|
}, {
|
||
|
key: "_getBuffer",
|
||
|
value: function _getBuffer(n) {
|
||
|
var ret = Buffer.allocUnsafe(n);
|
||
|
var p = this.head;
|
||
|
var c = 1;
|
||
|
p.data.copy(ret);
|
||
|
n -= p.data.length;
|
||
|
|
||
|
while (p = p.next) {
|
||
|
var buf = p.data;
|
||
|
var nb = n > buf.length ? buf.length : n;
|
||
|
buf.copy(ret, ret.length - n, 0, nb);
|
||
|
n -= nb;
|
||
|
|
||
|
if (n === 0) {
|
||
|
if (nb === buf.length) {
|
||
|
++c;
|
||
|
if (p.next) this.head = p.next;else this.head = this.tail = null;
|
||
|
} else {
|
||
|
this.head = p;
|
||
|
p.data = buf.slice(nb);
|
||
|
}
|
||
|
|
||
|
break;
|
||
|
}
|
||
|
|
||
|
++c;
|
||
|
}
|
||
|
|
||
|
this.length -= c;
|
||
|
return ret;
|
||
|
} // Make sure the linked list only shows the minimal necessary information.
|
||
|
|
||
|
}, {
|
||
|
key: custom,
|
||
|
value: function value(_, options) {
|
||
|
return inspect(this, _objectSpread({}, options, {
|
||
|
// Only inspect one level.
|
||
|
depth: 0,
|
||
|
// It should not recurse.
|
||
|
customInspect: false
|
||
|
}));
|
||
|
}
|
||
|
}]);
|
||
|
|
||
|
return BufferList;
|
||
|
}();
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 61195:
|
||
|
/***/ ((module) => {
|
||
|
|
||
|
"use strict";
|
||
|
// undocumented cb() API, needed for core, not for public API
|
||
|
|
||
|
function destroy(err, cb) {
|
||
|
var _this = this;
|
||
|
|
||
|
var readableDestroyed = this._readableState && this._readableState.destroyed;
|
||
|
var writableDestroyed = this._writableState && this._writableState.destroyed;
|
||
|
|
||
|
if (readableDestroyed || writableDestroyed) {
|
||
|
if (cb) {
|
||
|
cb(err);
|
||
|
} else if (err) {
|
||
|
if (!this._writableState) {
|
||
|
process.nextTick(emitErrorNT, this, err);
|
||
|
} else if (!this._writableState.errorEmitted) {
|
||
|
this._writableState.errorEmitted = true;
|
||
|
process.nextTick(emitErrorNT, this, err);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return this;
|
||
|
} // we set destroyed to true before firing error callbacks in order
|
||
|
// to make it re-entrance safe in case destroy() is called within callbacks
|
||
|
|
||
|
|
||
|
if (this._readableState) {
|
||
|
this._readableState.destroyed = true;
|
||
|
} // if this is a duplex stream mark the writable part as destroyed as well
|
||
|
|
||
|
|
||
|
if (this._writableState) {
|
||
|
this._writableState.destroyed = true;
|
||
|
}
|
||
|
|
||
|
this._destroy(err || null, function (err) {
|
||
|
if (!cb && err) {
|
||
|
if (!_this._writableState) {
|
||
|
process.nextTick(emitErrorAndCloseNT, _this, err);
|
||
|
} else if (!_this._writableState.errorEmitted) {
|
||
|
_this._writableState.errorEmitted = true;
|
||
|
process.nextTick(emitErrorAndCloseNT, _this, err);
|
||
|
} else {
|
||
|
process.nextTick(emitCloseNT, _this);
|
||
|
}
|
||
|
} else if (cb) {
|
||
|
process.nextTick(emitCloseNT, _this);
|
||
|
cb(err);
|
||
|
} else {
|
||
|
process.nextTick(emitCloseNT, _this);
|
||
|
}
|
||
|
});
|
||
|
|
||
|
return this;
|
||
|
}
|
||
|
|
||
|
function emitErrorAndCloseNT(self, err) {
|
||
|
emitErrorNT(self, err);
|
||
|
emitCloseNT(self);
|
||
|
}
|
||
|
|
||
|
function emitCloseNT(self) {
|
||
|
if (self._writableState && !self._writableState.emitClose) return;
|
||
|
if (self._readableState && !self._readableState.emitClose) return;
|
||
|
self.emit('close');
|
||
|
}
|
||
|
|
||
|
function undestroy() {
|
||
|
if (this._readableState) {
|
||
|
this._readableState.destroyed = false;
|
||
|
this._readableState.reading = false;
|
||
|
this._readableState.ended = false;
|
||
|
this._readableState.endEmitted = false;
|
||
|
}
|
||
|
|
||
|
if (this._writableState) {
|
||
|
this._writableState.destroyed = false;
|
||
|
this._writableState.ended = false;
|
||
|
this._writableState.ending = false;
|
||
|
this._writableState.finalCalled = false;
|
||
|
this._writableState.prefinished = false;
|
||
|
this._writableState.finished = false;
|
||
|
this._writableState.errorEmitted = false;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
function emitErrorNT(self, err) {
|
||
|
self.emit('error', err);
|
||
|
}
|
||
|
|
||
|
function errorOrDestroy(stream, err) {
|
||
|
// We have tests that rely on errors being emitted
|
||
|
// in the same tick, so changing this is semver major.
|
||
|
// For now when you opt-in to autoDestroy we allow
|
||
|
// the error to be emitted nextTick. In a future
|
||
|
// semver major update we should change the default to this.
|
||
|
var rState = stream._readableState;
|
||
|
var wState = stream._writableState;
|
||
|
if (rState && rState.autoDestroy || wState && wState.autoDestroy) stream.destroy(err);else stream.emit('error', err);
|
||
|
}
|
||
|
|
||
|
module.exports = {
|
||
|
destroy: destroy,
|
||
|
undestroy: undestroy,
|
||
|
errorOrDestroy: errorOrDestroy
|
||
|
};
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 8610:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
// Ported from https://github.com/mafintosh/end-of-stream with
|
||
|
// permission from the author, Mathias Buus (@mafintosh).
|
||
|
|
||
|
|
||
|
var ERR_STREAM_PREMATURE_CLOSE = __webpack_require__(4012)/* .codes.ERR_STREAM_PREMATURE_CLOSE */ .q.ERR_STREAM_PREMATURE_CLOSE;
|
||
|
|
||
|
function once(callback) {
|
||
|
var called = false;
|
||
|
return function () {
|
||
|
if (called) return;
|
||
|
called = true;
|
||
|
|
||
|
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {
|
||
|
args[_key] = arguments[_key];
|
||
|
}
|
||
|
|
||
|
callback.apply(this, args);
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function noop() {}
|
||
|
|
||
|
function isRequest(stream) {
|
||
|
return stream.setHeader && typeof stream.abort === 'function';
|
||
|
}
|
||
|
|
||
|
function eos(stream, opts, callback) {
|
||
|
if (typeof opts === 'function') return eos(stream, null, opts);
|
||
|
if (!opts) opts = {};
|
||
|
callback = once(callback || noop);
|
||
|
var readable = opts.readable || opts.readable !== false && stream.readable;
|
||
|
var writable = opts.writable || opts.writable !== false && stream.writable;
|
||
|
|
||
|
var onlegacyfinish = function onlegacyfinish() {
|
||
|
if (!stream.writable) onfinish();
|
||
|
};
|
||
|
|
||
|
var writableEnded = stream._writableState && stream._writableState.finished;
|
||
|
|
||
|
var onfinish = function onfinish() {
|
||
|
writable = false;
|
||
|
writableEnded = true;
|
||
|
if (!readable) callback.call(stream);
|
||
|
};
|
||
|
|
||
|
var readableEnded = stream._readableState && stream._readableState.endEmitted;
|
||
|
|
||
|
var onend = function onend() {
|
||
|
readable = false;
|
||
|
readableEnded = true;
|
||
|
if (!writable) callback.call(stream);
|
||
|
};
|
||
|
|
||
|
var onerror = function onerror(err) {
|
||
|
callback.call(stream, err);
|
||
|
};
|
||
|
|
||
|
var onclose = function onclose() {
|
||
|
var err;
|
||
|
|
||
|
if (readable && !readableEnded) {
|
||
|
if (!stream._readableState || !stream._readableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();
|
||
|
return callback.call(stream, err);
|
||
|
}
|
||
|
|
||
|
if (writable && !writableEnded) {
|
||
|
if (!stream._writableState || !stream._writableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();
|
||
|
return callback.call(stream, err);
|
||
|
}
|
||
|
};
|
||
|
|
||
|
var onrequest = function onrequest() {
|
||
|
stream.req.on('finish', onfinish);
|
||
|
};
|
||
|
|
||
|
if (isRequest(stream)) {
|
||
|
stream.on('complete', onfinish);
|
||
|
stream.on('abort', onclose);
|
||
|
if (stream.req) onrequest();else stream.on('request', onrequest);
|
||
|
} else if (writable && !stream._writableState) {
|
||
|
// legacy streams
|
||
|
stream.on('end', onlegacyfinish);
|
||
|
stream.on('close', onlegacyfinish);
|
||
|
}
|
||
|
|
||
|
stream.on('end', onend);
|
||
|
stream.on('finish', onfinish);
|
||
|
if (opts.error !== false) stream.on('error', onerror);
|
||
|
stream.on('close', onclose);
|
||
|
return function () {
|
||
|
stream.removeListener('complete', onfinish);
|
||
|
stream.removeListener('abort', onclose);
|
||
|
stream.removeListener('request', onrequest);
|
||
|
if (stream.req) stream.req.removeListener('finish', onfinish);
|
||
|
stream.removeListener('end', onlegacyfinish);
|
||
|
stream.removeListener('close', onlegacyfinish);
|
||
|
stream.removeListener('finish', onfinish);
|
||
|
stream.removeListener('end', onend);
|
||
|
stream.removeListener('error', onerror);
|
||
|
stream.removeListener('close', onclose);
|
||
|
};
|
||
|
}
|
||
|
|
||
|
module.exports = eos;
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 96307:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
|
||
|
|
||
|
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
|
||
|
|
||
|
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
|
||
|
|
||
|
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; }
|
||
|
|
||
|
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { _defineProperty(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; }
|
||
|
|
||
|
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
|
||
|
|
||
|
var ERR_INVALID_ARG_TYPE = __webpack_require__(4012)/* .codes.ERR_INVALID_ARG_TYPE */ .q.ERR_INVALID_ARG_TYPE;
|
||
|
|
||
|
function from(Readable, iterable, opts) {
|
||
|
var iterator;
|
||
|
|
||
|
if (iterable && typeof iterable.next === 'function') {
|
||
|
iterator = iterable;
|
||
|
} else if (iterable && iterable[Symbol.asyncIterator]) iterator = iterable[Symbol.asyncIterator]();else if (iterable && iterable[Symbol.iterator]) iterator = iterable[Symbol.iterator]();else throw new ERR_INVALID_ARG_TYPE('iterable', ['Iterable'], iterable);
|
||
|
|
||
|
var readable = new Readable(_objectSpread({
|
||
|
objectMode: true
|
||
|
}, opts)); // Reading boolean to protect against _read
|
||
|
// being called before last iteration completion.
|
||
|
|
||
|
var reading = false;
|
||
|
|
||
|
readable._read = function () {
|
||
|
if (!reading) {
|
||
|
reading = true;
|
||
|
next();
|
||
|
}
|
||
|
};
|
||
|
|
||
|
function next() {
|
||
|
return _next2.apply(this, arguments);
|
||
|
}
|
||
|
|
||
|
function _next2() {
|
||
|
_next2 = _asyncToGenerator(function* () {
|
||
|
try {
|
||
|
var _ref = yield iterator.next(),
|
||
|
value = _ref.value,
|
||
|
done = _ref.done;
|
||
|
|
||
|
if (done) {
|
||
|
readable.push(null);
|
||
|
} else if (readable.push((yield value))) {
|
||
|
next();
|
||
|
} else {
|
||
|
reading = false;
|
||
|
}
|
||
|
} catch (err) {
|
||
|
readable.destroy(err);
|
||
|
}
|
||
|
});
|
||
|
return _next2.apply(this, arguments);
|
||
|
}
|
||
|
|
||
|
return readable;
|
||
|
}
|
||
|
|
||
|
module.exports = from;
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 59946:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
// Ported from https://github.com/mafintosh/pump with
|
||
|
// permission from the author, Mathias Buus (@mafintosh).
|
||
|
|
||
|
|
||
|
var eos;
|
||
|
|
||
|
function once(callback) {
|
||
|
var called = false;
|
||
|
return function () {
|
||
|
if (called) return;
|
||
|
called = true;
|
||
|
callback.apply(void 0, arguments);
|
||
|
};
|
||
|
}
|
||
|
|
||
|
var _require$codes = __webpack_require__(4012)/* .codes */ .q,
|
||
|
ERR_MISSING_ARGS = _require$codes.ERR_MISSING_ARGS,
|
||
|
ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED;
|
||
|
|
||
|
function noop(err) {
|
||
|
// Rethrow the error if it exists to avoid swallowing it
|
||
|
if (err) throw err;
|
||
|
}
|
||
|
|
||
|
function isRequest(stream) {
|
||
|
return stream.setHeader && typeof stream.abort === 'function';
|
||
|
}
|
||
|
|
||
|
function destroyer(stream, reading, writing, callback) {
|
||
|
callback = once(callback);
|
||
|
var closed = false;
|
||
|
stream.on('close', function () {
|
||
|
closed = true;
|
||
|
});
|
||
|
if (eos === undefined) eos = __webpack_require__(8610);
|
||
|
eos(stream, {
|
||
|
readable: reading,
|
||
|
writable: writing
|
||
|
}, function (err) {
|
||
|
if (err) return callback(err);
|
||
|
closed = true;
|
||
|
callback();
|
||
|
});
|
||
|
var destroyed = false;
|
||
|
return function (err) {
|
||
|
if (closed) return;
|
||
|
if (destroyed) return;
|
||
|
destroyed = true; // request.destroy just do .end - .abort is what we want
|
||
|
|
||
|
if (isRequest(stream)) return stream.abort();
|
||
|
if (typeof stream.destroy === 'function') return stream.destroy();
|
||
|
callback(err || new ERR_STREAM_DESTROYED('pipe'));
|
||
|
};
|
||
|
}
|
||
|
|
||
|
function call(fn) {
|
||
|
fn();
|
||
|
}
|
||
|
|
||
|
function pipe(from, to) {
|
||
|
return from.pipe(to);
|
||
|
}
|
||
|
|
||
|
function popCallback(streams) {
|
||
|
if (!streams.length) return noop;
|
||
|
if (typeof streams[streams.length - 1] !== 'function') return noop;
|
||
|
return streams.pop();
|
||
|
}
|
||
|
|
||
|
function pipeline() {
|
||
|
for (var _len = arguments.length, streams = new Array(_len), _key = 0; _key < _len; _key++) {
|
||
|
streams[_key] = arguments[_key];
|
||
|
}
|
||
|
|
||
|
var callback = popCallback(streams);
|
||
|
if (Array.isArray(streams[0])) streams = streams[0];
|
||
|
|
||
|
if (streams.length < 2) {
|
||
|
throw new ERR_MISSING_ARGS('streams');
|
||
|
}
|
||
|
|
||
|
var error;
|
||
|
var destroys = streams.map(function (stream, i) {
|
||
|
var reading = i < streams.length - 1;
|
||
|
var writing = i > 0;
|
||
|
return destroyer(stream, reading, writing, function (err) {
|
||
|
if (!error) error = err;
|
||
|
if (err) destroys.forEach(call);
|
||
|
if (reading) return;
|
||
|
destroys.forEach(call);
|
||
|
callback(error);
|
||
|
});
|
||
|
});
|
||
|
return streams.reduce(pipe);
|
||
|
}
|
||
|
|
||
|
module.exports = pipeline;
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 82457:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
|
||
|
|
||
|
var ERR_INVALID_OPT_VALUE = __webpack_require__(4012)/* .codes.ERR_INVALID_OPT_VALUE */ .q.ERR_INVALID_OPT_VALUE;
|
||
|
|
||
|
function highWaterMarkFrom(options, isDuplex, duplexKey) {
|
||
|
return options.highWaterMark != null ? options.highWaterMark : isDuplex ? options[duplexKey] : null;
|
||
|
}
|
||
|
|
||
|
function getHighWaterMark(state, options, duplexKey, isDuplex) {
|
||
|
var hwm = highWaterMarkFrom(options, isDuplex, duplexKey);
|
||
|
|
||
|
if (hwm != null) {
|
||
|
if (!(isFinite(hwm) && Math.floor(hwm) === hwm) || hwm < 0) {
|
||
|
var name = isDuplex ? duplexKey : 'highWaterMark';
|
||
|
throw new ERR_INVALID_OPT_VALUE(name, hwm);
|
||
|
}
|
||
|
|
||
|
return Math.floor(hwm);
|
||
|
} // Default value
|
||
|
|
||
|
|
||
|
return state.objectMode ? 16 : 16 * 1024;
|
||
|
}
|
||
|
|
||
|
module.exports = {
|
||
|
getHighWaterMark: getHighWaterMark
|
||
|
};
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 79740:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
module.exports = __webpack_require__(92413);
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 11451:
|
||
|
/***/ ((module, exports, __webpack_require__) => {
|
||
|
|
||
|
var Stream = __webpack_require__(92413);
|
||
|
if (process.env.READABLE_STREAM === 'disable' && Stream) {
|
||
|
module.exports = Stream.Readable;
|
||
|
Object.assign(module.exports, Stream);
|
||
|
module.exports.Stream = Stream;
|
||
|
} else {
|
||
|
exports = module.exports = __webpack_require__(79481);
|
||
|
exports.Stream = Stream || exports;
|
||
|
exports.Readable = exports;
|
||
|
exports.Writable = __webpack_require__(64229);
|
||
|
exports.Duplex = __webpack_require__(56753);
|
||
|
exports.Transform = __webpack_require__(74605);
|
||
|
exports.PassThrough = __webpack_require__(82725);
|
||
|
exports.finished = __webpack_require__(8610);
|
||
|
exports.pipeline = __webpack_require__(59946);
|
||
|
}
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 32553:
|
||
|
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
|
||
|
|
||
|
"use strict";
|
||
|
// Copyright Joyent, Inc. and other Node contributors.
|
||
|
//
|
||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||
|
// copy of this software and associated documentation files (the
|
||
|
// "Software"), to deal in the Software without restriction, including
|
||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||
|
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
||
|
// persons to whom the Software is furnished to do so, subject to the
|
||
|
// following conditions:
|
||
|
//
|
||
|
// The above copyright notice and this permission notice shall be included
|
||
|
// in all copies or substantial portions of the Software.
|
||
|
//
|
||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||
|
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||
|
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||
|
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||
|
|
||
|
|
||
|
|
||
|
/*<replacement>*/
|
||
|
|
||
|
var Buffer = __webpack_require__(40396).Buffer;
|
||
|
/*</replacement>*/
|
||
|
|
||
|
var isEncoding = Buffer.isEncoding || function (encoding) {
|
||
|
encoding = '' + encoding;
|
||
|
switch (encoding && encoding.toLowerCase()) {
|
||
|
case 'hex':case 'utf8':case 'utf-8':case 'ascii':case 'binary':case 'base64':case 'ucs2':case 'ucs-2':case 'utf16le':case 'utf-16le':case 'raw':
|
||
|
return true;
|
||
|
default:
|
||
|
return false;
|
||
|
}
|
||
|
};
|
||
|
|
||
|
function _normalizeEncoding(enc) {
|
||
|
if (!enc) return 'utf8';
|
||
|
var retried;
|
||
|
while (true) {
|
||
|
switch (enc) {
|
||
|
case 'utf8':
|
||
|
case 'utf-8':
|
||
|
return 'utf8';
|
||
|
case 'ucs2':
|
||
|
case 'ucs-2':
|
||
|
case 'utf16le':
|
||
|
case 'utf-16le':
|
||
|
return 'utf16le';
|
||
|
case 'latin1':
|
||
|
case 'binary':
|
||
|
return 'latin1';
|
||
|
case 'base64':
|
||
|
case 'ascii':
|
||
|
case 'hex':
|
||
|
return enc;
|
||
|
default:
|
||
|
if (retried) return; // undefined
|
||
|
enc = ('' + enc).toLowerCase();
|
||
|
retried = true;
|
||
|
}
|
||
|
}
|
||
|
};
|
||
|
|
||
|
// Do not cache `Buffer.isEncoding` when checking encoding names as some
|
||
|
// modules monkey-patch it to support additional encodings
|
||
|
function normalizeEncoding(enc) {
|
||
|
var nenc = _normalizeEncoding(enc);
|
||
|
if (typeof nenc !== 'string' && (Buffer.isEncoding === isEncoding || !isEncoding(enc))) throw new Error('Unknown encoding: ' + enc);
|
||
|
return nenc || enc;
|
||
|
}
|
||
|
|
||
|
// StringDecoder provides an interface for efficiently splitting a series of
|
||
|
// buffers into a series of JS strings without breaking apart multi-byte
|
||
|
// characters.
|
||
|
exports.s = StringDecoder;
|
||
|
function StringDecoder(encoding) {
|
||
|
this.encoding = normalizeEncoding(encoding);
|
||
|
var nb;
|
||
|
switch (this.encoding) {
|
||
|
case 'utf16le':
|
||
|
this.text = utf16Text;
|
||
|
this.end = utf16End;
|
||
|
nb = 4;
|
||
|
break;
|
||
|
case 'utf8':
|
||
|
this.fillLast = utf8FillLast;
|
||
|
nb = 4;
|
||
|
break;
|
||
|
case 'base64':
|
||
|
this.text = base64Text;
|
||
|
this.end = base64End;
|
||
|
nb = 3;
|
||
|
break;
|
||
|
default:
|
||
|
this.write = simpleWrite;
|
||
|
this.end = simpleEnd;
|
||
|
return;
|
||
|
}
|
||
|
this.lastNeed = 0;
|
||
|
this.lastTotal = 0;
|
||
|
this.lastChar = Buffer.allocUnsafe(nb);
|
||
|
}
|
||
|
|
||
|
StringDecoder.prototype.write = function (buf) {
|
||
|
if (buf.length === 0) return '';
|
||
|
var r;
|
||
|
var i;
|
||
|
if (this.lastNeed) {
|
||
|
r = this.fillLast(buf);
|
||
|
if (r === undefined) return '';
|
||
|
i = this.lastNeed;
|
||
|
this.lastNeed = 0;
|
||
|
} else {
|
||
|
i = 0;
|
||
|
}
|
||
|
if (i < buf.length) return r ? r + this.text(buf, i) : this.text(buf, i);
|
||
|
return r || '';
|
||
|
};
|
||
|
|
||
|
StringDecoder.prototype.end = utf8End;
|
||
|
|
||
|
// Returns only complete characters in a Buffer
|
||
|
StringDecoder.prototype.text = utf8Text;
|
||
|
|
||
|
// Attempts to complete a partial non-UTF-8 character using bytes from a Buffer
|
||
|
StringDecoder.prototype.fillLast = function (buf) {
|
||
|
if (this.lastNeed <= buf.length) {
|
||
|
buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, this.lastNeed);
|
||
|
return this.lastChar.toString(this.encoding, 0, this.lastTotal);
|
||
|
}
|
||
|
buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, buf.length);
|
||
|
this.lastNeed -= buf.length;
|
||
|
};
|
||
|
|
||
|
// Checks the type of a UTF-8 byte, whether it's ASCII, a leading byte, or a
|
||
|
// continuation byte. If an invalid byte is detected, -2 is returned.
|
||
|
function utf8CheckByte(byte) {
|
||
|
if (byte <= 0x7F) return 0;else if (byte >> 5 === 0x06) return 2;else if (byte >> 4 === 0x0E) return 3;else if (byte >> 3 === 0x1E) return 4;
|
||
|
return byte >> 6 === 0x02 ? -1 : -2;
|
||
|
}
|
||
|
|
||
|
// Checks at most 3 bytes at the end of a Buffer in order to detect an
|
||
|
// incomplete multi-byte UTF-8 character. The total number of bytes (2, 3, or 4)
|
||
|
// needed to complete the UTF-8 character (if applicable) are returned.
|
||
|
function utf8CheckIncomplete(self, buf, i) {
|
||
|
var j = buf.length - 1;
|
||
|
if (j < i) return 0;
|
||
|
var nb = utf8CheckByte(buf[j]);
|
||
|
if (nb >= 0) {
|
||
|
if (nb > 0) self.lastNeed = nb - 1;
|
||
|
return nb;
|
||
|
}
|
||
|
if (--j < i || nb === -2) return 0;
|
||
|
nb = utf8CheckByte(buf[j]);
|
||
|
if (nb >= 0) {
|
||
|
if (nb > 0) self.lastNeed = nb - 2;
|
||
|
return nb;
|
||
|
}
|
||
|
if (--j < i || nb === -2) return 0;
|
||
|
nb = utf8CheckByte(buf[j]);
|
||
|
if (nb >= 0) {
|
||
|
if (nb > 0) {
|
||
|
if (nb === 2) nb = 0;else self.lastNeed = nb - 3;
|
||
|
}
|
||
|
return nb;
|
||
|
}
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
// Validates as many continuation bytes for a multi-byte UTF-8 character as
|
||
|
// needed or are available. If we see a non-continuation byte where we expect
|
||
|
// one, we "replace" the validated continuation bytes we've seen so far with
|
||
|
// a single UTF-8 replacement character ('\ufffd'), to match v8's UTF-8 decoding
|
||
|
// behavior. The continuation byte check is included three times in the case
|
||
|
// where all of the continuation bytes for a character exist in the same buffer.
|
||
|
// It is also done this way as a slight performance increase instead of using a
|
||
|
// loop.
|
||
|
function utf8CheckExtraBytes(self, buf, p) {
|
||
|
if ((buf[0] & 0xC0) !== 0x80) {
|
||
|
self.lastNeed = 0;
|
||
|
return '\ufffd';
|
||
|
}
|
||
|
if (self.lastNeed > 1 && buf.length > 1) {
|
||
|
if ((buf[1] & 0xC0) !== 0x80) {
|
||
|
self.lastNeed = 1;
|
||
|
return '\ufffd';
|
||
|
}
|
||
|
if (self.lastNeed > 2 && buf.length > 2) {
|
||
|
if ((buf[2] & 0xC0) !== 0x80) {
|
||
|
self.lastNeed = 2;
|
||
|
return '\ufffd';
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// Attempts to complete a multi-byte UTF-8 character using bytes from a Buffer.
|
||
|
function utf8FillLast(buf) {
|
||
|
var p = this.lastTotal - this.lastNeed;
|
||
|
var r = utf8CheckExtraBytes(this, buf, p);
|
||
|
if (r !== undefined) return r;
|
||
|
if (this.lastNeed <= buf.length) {
|
||
|
buf.copy(this.lastChar, p, 0, this.lastNeed);
|
||
|
return this.lastChar.toString(this.encoding, 0, this.lastTotal);
|
||
|
}
|
||
|
buf.copy(this.lastChar, p, 0, buf.length);
|
||
|
this.lastNeed -= buf.length;
|
||
|
}
|
||
|
|
||
|
// Returns all complete UTF-8 characters in a Buffer. If the Buffer ended on a
|
||
|
// partial character, the character's bytes are buffered until the required
|
||
|
// number of bytes are available.
|
||
|
function utf8Text(buf, i) {
|
||
|
var total = utf8CheckIncomplete(this, buf, i);
|
||
|
if (!this.lastNeed) return buf.toString('utf8', i);
|
||
|
this.lastTotal = total;
|
||
|
var end = buf.length - (total - this.lastNeed);
|
||
|
buf.copy(this.lastChar, 0, end);
|
||
|
return buf.toString('utf8', i, end);
|
||
|
}
|
||
|
|
||
|
// For UTF-8, a replacement character is added when ending on a partial
|
||
|
// character.
|
||
|
function utf8End(buf) {
|
||
|
var r = buf && buf.length ? this.write(buf) : '';
|
||
|
if (this.lastNeed) return r + '\ufffd';
|
||
|
return r;
|
||
|
}
|
||
|
|
||
|
// UTF-16LE typically needs two bytes per character, but even if we have an even
|
||
|
// number of bytes available, we need to check if we end on a leading/high
|
||
|
// surrogate. In that case, we need to wait for the next two bytes in order to
|
||
|
// decode the last character properly.
|
||
|
function utf16Text(buf, i) {
|
||
|
if ((buf.length - i) % 2 === 0) {
|
||
|
var r = buf.toString('utf16le', i);
|
||
|
if (r) {
|
||
|
var c = r.charCodeAt(r.length - 1);
|
||
|
if (c >= 0xD800 && c <= 0xDBFF) {
|
||
|
this.lastNeed = 2;
|
||
|
this.lastTotal = 4;
|
||
|
this.lastChar[0] = buf[buf.length - 2];
|
||
|
this.lastChar[1] = buf[buf.length - 1];
|
||
|
return r.slice(0, -1);
|
||
|
}
|
||
|
}
|
||
|
return r;
|
||
|
}
|
||
|
this.lastNeed = 1;
|
||
|
this.lastTotal = 2;
|
||
|
this.lastChar[0] = buf[buf.length - 1];
|
||
|
return buf.toString('utf16le', i, buf.length - 1);
|
||
|
}
|
||
|
|
||
|
// For UTF-16LE we do not explicitly append special replacement characters if we
|
||
|
// end on a partial character, we simply let v8 handle that.
|
||
|
function utf16End(buf) {
|
||
|
var r = buf && buf.length ? this.write(buf) : '';
|
||
|
if (this.lastNeed) {
|
||
|
var end = this.lastTotal - this.lastNeed;
|
||
|
return r + this.lastChar.toString('utf16le', 0, end);
|
||
|
}
|
||
|
return r;
|
||
|
}
|
||
|
|
||
|
function base64Text(buf, i) {
|
||
|
var n = (buf.length - i) % 3;
|
||
|
if (n === 0) return buf.toString('base64', i);
|
||
|
this.lastNeed = 3 - n;
|
||
|
this.lastTotal = 3;
|
||
|
if (n === 1) {
|
||
|
this.lastChar[0] = buf[buf.length - 1];
|
||
|
} else {
|
||
|
this.lastChar[0] = buf[buf.length - 2];
|
||
|
this.lastChar[1] = buf[buf.length - 1];
|
||
|
}
|
||
|
return buf.toString('base64', i, buf.length - n);
|
||
|
}
|
||
|
|
||
|
function base64End(buf) {
|
||
|
var r = buf && buf.length ? this.write(buf) : '';
|
||
|
if (this.lastNeed) return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed);
|
||
|
return r;
|
||
|
}
|
||
|
|
||
|
// Pass bytes on through for single-byte encodings (e.g. ascii, latin1, hex)
|
||
|
function simpleWrite(buf) {
|
||
|
return buf.toString(this.encoding);
|
||
|
}
|
||
|
|
||
|
function simpleEnd(buf) {
|
||
|
return buf && buf.length ? this.write(buf) : '';
|
||
|
}
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 40396:
|
||
|
/***/ ((module, exports, __webpack_require__) => {
|
||
|
|
||
|
/*! safe-buffer. MIT License. Feross Aboukhadijeh <https://feross.org/opensource> */
|
||
|
/* eslint-disable node/no-deprecated-api */
|
||
|
var buffer = __webpack_require__(64293)
|
||
|
var Buffer = buffer.Buffer
|
||
|
|
||
|
// alternative to using Object.keys for old browsers
|
||
|
function copyProps (src, dst) {
|
||
|
for (var key in src) {
|
||
|
dst[key] = src[key]
|
||
|
}
|
||
|
}
|
||
|
if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) {
|
||
|
module.exports = buffer
|
||
|
} else {
|
||
|
// Copy properties from require('buffer')
|
||
|
copyProps(buffer, exports)
|
||
|
exports.Buffer = SafeBuffer
|
||
|
}
|
||
|
|
||
|
function SafeBuffer (arg, encodingOrOffset, length) {
|
||
|
return Buffer(arg, encodingOrOffset, length)
|
||
|
}
|
||
|
|
||
|
SafeBuffer.prototype = Object.create(Buffer.prototype)
|
||
|
|
||
|
// Copy static methods from Buffer
|
||
|
copyProps(Buffer, SafeBuffer)
|
||
|
|
||
|
SafeBuffer.from = function (arg, encodingOrOffset, length) {
|
||
|
if (typeof arg === 'number') {
|
||
|
throw new TypeError('Argument must not be a number')
|
||
|
}
|
||
|
return Buffer(arg, encodingOrOffset, length)
|
||
|
}
|
||
|
|
||
|
SafeBuffer.alloc = function (size, fill, encoding) {
|
||
|
if (typeof size !== 'number') {
|
||
|
throw new TypeError('Argument must be a number')
|
||
|
}
|
||
|
var buf = Buffer(size)
|
||
|
if (fill !== undefined) {
|
||
|
if (typeof encoding === 'string') {
|
||
|
buf.fill(fill, encoding)
|
||
|
} else {
|
||
|
buf.fill(fill)
|
||
|
}
|
||
|
} else {
|
||
|
buf.fill(0)
|
||
|
}
|
||
|
return buf
|
||
|
}
|
||
|
|
||
|
SafeBuffer.allocUnsafe = function (size) {
|
||
|
if (typeof size !== 'number') {
|
||
|
throw new TypeError('Argument must be a number')
|
||
|
}
|
||
|
return Buffer(size)
|
||
|
}
|
||
|
|
||
|
SafeBuffer.allocUnsafeSlow = function (size) {
|
||
|
if (typeof size !== 'number') {
|
||
|
throw new TypeError('Argument must be a number')
|
||
|
}
|
||
|
return buffer.SlowBuffer(size)
|
||
|
}
|
||
|
|
||
|
|
||
|
/***/ }),
|
||
|
|
||
|
/***/ 41159:
|
||
|
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
|
||
|
|
||
|
|
||
|
/**
|
||
|
* For Node.js, simply re-export the core `util.deprecate` function.
|
||
|
*/
|
||
|
|
||
|
module.exports = __webpack_require__(31669).deprecate;
|
||
|
|
||
|
|
||
|
/***/ })
|
||
|
|
||
|
};
|
||
|
;
|
||
|
//# sourceMappingURL=905.index.js.map
|