UNPKG

32.1 kBJavaScriptView Raw
1"use strict";
2/*!
3 * Copyright 2018 Google LLC
4 *
5 * Use of this source code is governed by an MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT.
8 */
9Object.defineProperty(exports, "__esModule", { value: true });
10exports.createURI = exports.upload = exports.Upload = exports.PROTOCOL_REGEX = void 0;
11const abort_controller_1 = require("abort-controller");
12const ConfigStore = require("configstore");
13const crypto_1 = require("crypto");
14const extend = require("extend");
15const gaxios = require("gaxios");
16const google_auth_library_1 = require("google-auth-library");
17const Pumpify = require("pumpify");
18const stream_1 = require("stream");
19const streamEvents = require("stream-events");
20const retry = require("async-retry");
21const NOT_FOUND_STATUS_CODE = 404;
22const TERMINATED_UPLOAD_STATUS_CODE = 410;
23const RESUMABLE_INCOMPLETE_STATUS_CODE = 308;
24const RETRY_LIMIT = 5;
25const DEFAULT_API_ENDPOINT_REGEX = /.*\.googleapis\.com/;
26const MAX_RETRY_DELAY = 64;
27const RETRY_DELAY_MULTIPLIER = 2;
28const MAX_TOTAL_RETRY_TIMEOUT = 600;
29const AUTO_RETRY_VALUE = true;
30exports.PROTOCOL_REGEX = /^(\w*):\/\//;
31class Upload extends Pumpify {
32 constructor(cfg) {
33 var _a, _b, _c, _d, _e, _f;
34 super();
35 this.numBytesWritten = 0;
36 this.numRetries = 0;
37 this.retryLimit = RETRY_LIMIT;
38 this.maxRetryDelay = MAX_RETRY_DELAY;
39 this.retryDelayMultiplier = RETRY_DELAY_MULTIPLIER;
40 this.maxRetryTotalTimeout = MAX_TOTAL_RETRY_TIMEOUT;
41 this.upstreamChunkBuffer = Buffer.alloc(0);
42 this.chunkBufferEncoding = undefined;
43 this.numChunksReadInRequest = 0;
44 /**
45 * A chunk used for caching the most recent upload chunk.
46 * We should not assume that the server received all bytes sent in the request.
47 * - https://cloud.google.com/storage/docs/performing-resumable-uploads#chunked-upload
48 */
49 this.lastChunkSent = Buffer.alloc(0);
50 this.upstreamEnded = false;
51 /** A stream representing the incoming data to upload */
52 this.upstream = new stream_1.Duplex({
53 read: async () => {
54 this.once('prepareFinish', () => {
55 // Allows this (`Upload`) to finish/end once the upload has succeeded.
56 this.upstream.push(null);
57 });
58 },
59 write: this.writeToChunkBuffer.bind(this),
60 });
61 streamEvents(this);
62 cfg = cfg || {};
63 if (!cfg.bucket || !cfg.file) {
64 throw new Error('A bucket and file name are required');
65 }
66 cfg.authConfig = cfg.authConfig || {};
67 cfg.authConfig.scopes = [
68 'https://www.googleapis.com/auth/devstorage.full_control',
69 ];
70 this.authClient = cfg.authClient || new google_auth_library_1.GoogleAuth(cfg.authConfig);
71 this.apiEndpoint = 'https://storage.googleapis.com';
72 if (cfg.apiEndpoint) {
73 this.apiEndpoint = this.sanitizeEndpoint(cfg.apiEndpoint);
74 if (!DEFAULT_API_ENDPOINT_REGEX.test(cfg.apiEndpoint)) {
75 this.authClient = gaxios;
76 }
77 }
78 this.baseURI = `${this.apiEndpoint}/upload/storage/v1/b`;
79 this.bucket = cfg.bucket;
80 const cacheKeyElements = [cfg.bucket, cfg.file];
81 if (typeof cfg.generation === 'number') {
82 cacheKeyElements.push(`${cfg.generation}`);
83 }
84 this.cacheKey = cacheKeyElements.join('/');
85 this.customRequestOptions = cfg.customRequestOptions || {};
86 this.file = cfg.file;
87 this.generation = cfg.generation;
88 this.kmsKeyName = cfg.kmsKeyName;
89 this.metadata = cfg.metadata || {};
90 this.offset = cfg.offset;
91 this.origin = cfg.origin;
92 this.params = cfg.params || {};
93 this.userProject = cfg.userProject;
94 this.chunkSize = cfg.chunkSize;
95 if (cfg.key) {
96 /**
97 * NOTE: This is `as string` because there appears to be some weird kind
98 * of TypeScript bug as 2.8. Tracking the issue here:
99 * https://github.com/Microsoft/TypeScript/issues/23155
100 */
101 const base64Key = Buffer.from(cfg.key).toString('base64');
102 this.encryption = {
103 key: base64Key,
104 hash: (0, crypto_1.createHash)('sha256').update(cfg.key).digest('base64'),
105 };
106 }
107 this.predefinedAcl = cfg.predefinedAcl;
108 if (cfg.private)
109 this.predefinedAcl = 'private';
110 if (cfg.public)
111 this.predefinedAcl = 'publicRead';
112 const configPath = cfg.configPath;
113 this.configStore = new ConfigStore('gcs-resumable-upload', null, {
114 configPath,
115 });
116 const autoRetry = ((_a = cfg === null || cfg === void 0 ? void 0 : cfg.retryOptions) === null || _a === void 0 ? void 0 : _a.autoRetry) || AUTO_RETRY_VALUE;
117 this.uriProvidedManually = !!cfg.uri;
118 this.uri = cfg.uri || this.get('uri');
119 this.numBytesWritten = 0;
120 this.numRetries = 0; //counter for number of retries currently executed
121 if (autoRetry && ((_b = cfg === null || cfg === void 0 ? void 0 : cfg.retryOptions) === null || _b === void 0 ? void 0 : _b.maxRetries) !== undefined) {
122 this.retryLimit = cfg.retryOptions.maxRetries;
123 }
124 else if (!autoRetry) {
125 this.retryLimit = 0;
126 }
127 if (((_c = cfg === null || cfg === void 0 ? void 0 : cfg.retryOptions) === null || _c === void 0 ? void 0 : _c.maxRetryDelay) !== undefined) {
128 this.maxRetryDelay = cfg.retryOptions.maxRetryDelay;
129 }
130 if (((_d = cfg === null || cfg === void 0 ? void 0 : cfg.retryOptions) === null || _d === void 0 ? void 0 : _d.retryDelayMultiplier) !== undefined) {
131 this.retryDelayMultiplier = cfg.retryOptions.retryDelayMultiplier;
132 }
133 if (((_e = cfg === null || cfg === void 0 ? void 0 : cfg.retryOptions) === null || _e === void 0 ? void 0 : _e.totalTimeout) !== undefined) {
134 this.maxRetryTotalTimeout = cfg.retryOptions.totalTimeout;
135 }
136 this.timeOfFirstRequest = Date.now();
137 this.retryableErrorFn = (_f = cfg === null || cfg === void 0 ? void 0 : cfg.retryOptions) === null || _f === void 0 ? void 0 : _f.retryableErrorFn;
138 const contentLength = cfg.metadata
139 ? Number(cfg.metadata.contentLength)
140 : NaN;
141 this.contentLength = isNaN(contentLength) ? '*' : contentLength;
142 this.upstream.on('end', () => {
143 this.upstreamEnded = true;
144 });
145 this.on('prefinish', () => {
146 this.upstreamEnded = true;
147 });
148 this.once('writing', () => {
149 // Now that someone is writing to this object, let's attach
150 // some duplexes. These duplexes enable this object to be
151 // better managed in terms of 'end'/'finish' control and
152 // buffering writes downstream if someone enables multi-
153 // chunk upload support (`chunkSize`) w/o adding too much into
154 // memory.
155 this.setPipeline(this.upstream, new stream_1.PassThrough());
156 if (this.uri) {
157 this.continueUploading();
158 }
159 else {
160 this.createURI((err, uri) => {
161 if (err) {
162 return this.destroy(err);
163 }
164 this.set({ uri });
165 this.startUploading();
166 return;
167 });
168 }
169 });
170 }
171 /**
172 * A handler for `upstream` to write and buffer its data.
173 *
174 * @param chunk The chunk to append to the buffer
175 * @param encoding The encoding of the chunk
176 * @param readCallback A callback for when the buffer has been read downstream
177 */
178 writeToChunkBuffer(chunk, encoding, readCallback) {
179 this.upstreamChunkBuffer = Buffer.concat([
180 this.upstreamChunkBuffer,
181 typeof chunk === 'string' ? Buffer.from(chunk, encoding) : chunk,
182 ]);
183 this.chunkBufferEncoding = encoding;
184 this.once('readFromChunkBuffer', readCallback);
185 process.nextTick(() => this.emit('wroteToChunkBuffer'));
186 }
187 /**
188 * Prepends data back to the upstream chunk buffer.
189 *
190 * @param chunk The data to prepend
191 */
192 unshiftChunkBuffer(chunk) {
193 this.upstreamChunkBuffer = Buffer.concat([chunk, this.upstreamChunkBuffer]);
194 }
195 /**
196 * Retrieves data from upstream's buffer.
197 *
198 * @param limit The maximum amount to return from the buffer.
199 * @returns The data requested.
200 */
201 pullFromChunkBuffer(limit) {
202 const chunk = this.upstreamChunkBuffer.slice(0, limit);
203 this.upstreamChunkBuffer = this.upstreamChunkBuffer.slice(limit);
204 // notify upstream we've read from the buffer so it can potentially
205 // send more data down.
206 process.nextTick(() => this.emit('readFromChunkBuffer'));
207 return chunk;
208 }
209 /**
210 * A handler for determining if data is ready to be read from upstream.
211 *
212 * @returns If there will be more chunks to read in the future
213 */
214 async waitForNextChunk() {
215 const willBeMoreChunks = await new Promise(resolve => {
216 // There's data available - it should be digested
217 if (this.upstreamChunkBuffer.byteLength) {
218 return resolve(true);
219 }
220 // The upstream writable ended, we shouldn't expect any more data.
221 if (this.upstreamEnded) {
222 return resolve(false);
223 }
224 // Nothing immediate seems to be determined. We need to prepare some
225 // listeners to determine next steps...
226 const wroteToChunkBufferCallback = () => {
227 removeListeners();
228 return resolve(true);
229 };
230 const upstreamFinishedCallback = () => {
231 removeListeners();
232 // this should be the last chunk, if there's anything there
233 if (this.upstreamChunkBuffer.length)
234 return resolve(true);
235 return resolve(false);
236 };
237 // Remove listeners when we're ready to callback.
238 // It's important to clean-up listeners as Node has a default max number of
239 // event listeners. Notably, The number of requests can be greater than the
240 // number of potential listeners.
241 // - https://nodejs.org/api/events.html#eventsdefaultmaxlisteners
242 const removeListeners = () => {
243 this.removeListener('wroteToChunkBuffer', wroteToChunkBufferCallback);
244 this.upstream.removeListener('finish', upstreamFinishedCallback);
245 this.removeListener('prefinish', upstreamFinishedCallback);
246 };
247 // If there's data recently written it should be digested
248 this.once('wroteToChunkBuffer', wroteToChunkBufferCallback);
249 // If the upstream finishes let's see if there's anything to grab
250 this.upstream.once('finish', upstreamFinishedCallback);
251 this.once('prefinish', upstreamFinishedCallback);
252 });
253 return willBeMoreChunks;
254 }
255 /**
256 * Reads data from upstream up to the provided `limit`.
257 * Ends when the limit has reached or no data is expected to be pushed from upstream.
258 *
259 * @param limit The most amount of data this iterator should return. `Infinity` by default.
260 * @param oneChunkMode Determines if one, exhaustive chunk is yielded for the iterator
261 */
262 async *upstreamIterator(limit = Infinity, oneChunkMode) {
263 let completeChunk = Buffer.alloc(0);
264 // read from upstream chunk buffer
265 while (limit && (await this.waitForNextChunk())) {
266 // read until end or limit has been reached
267 const chunk = this.pullFromChunkBuffer(limit);
268 limit -= chunk.byteLength;
269 if (oneChunkMode) {
270 // return 1 chunk at the end of iteration
271 completeChunk = Buffer.concat([completeChunk, chunk]);
272 }
273 else {
274 // return many chunks throughout iteration
275 yield {
276 chunk,
277 encoding: this.chunkBufferEncoding,
278 };
279 }
280 }
281 if (oneChunkMode) {
282 yield {
283 chunk: completeChunk,
284 encoding: this.chunkBufferEncoding,
285 };
286 }
287 }
288 createURI(callback) {
289 if (!callback) {
290 return this.createURIAsync();
291 }
292 this.createURIAsync().then(r => callback(null, r), callback);
293 }
294 async createURIAsync() {
295 const metadata = this.metadata;
296 const reqOpts = {
297 method: 'POST',
298 url: [this.baseURI, this.bucket, 'o'].join('/'),
299 params: Object.assign({
300 name: this.file,
301 uploadType: 'resumable',
302 }, this.params),
303 data: metadata,
304 headers: {},
305 };
306 if (metadata.contentLength) {
307 reqOpts.headers['X-Upload-Content-Length'] =
308 metadata.contentLength.toString();
309 }
310 if (metadata.contentType) {
311 reqOpts.headers['X-Upload-Content-Type'] = metadata.contentType;
312 }
313 if (typeof this.generation !== 'undefined') {
314 reqOpts.params.ifGenerationMatch = this.generation;
315 }
316 if (this.kmsKeyName) {
317 reqOpts.params.kmsKeyName = this.kmsKeyName;
318 }
319 if (this.predefinedAcl) {
320 reqOpts.params.predefinedAcl = this.predefinedAcl;
321 }
322 if (this.origin) {
323 reqOpts.headers.Origin = this.origin;
324 }
325 const uri = await retry(async (bail) => {
326 var _a, _b, _c;
327 try {
328 const res = await this.makeRequest(reqOpts);
329 return res.headers.location;
330 }
331 catch (err) {
332 const e = err;
333 const apiError = {
334 code: (_a = e.response) === null || _a === void 0 ? void 0 : _a.status,
335 name: (_b = e.response) === null || _b === void 0 ? void 0 : _b.statusText,
336 message: (_c = e.response) === null || _c === void 0 ? void 0 : _c.statusText,
337 errors: [
338 {
339 reason: e.code,
340 },
341 ],
342 };
343 if (this.retryLimit > 0 &&
344 this.retryableErrorFn &&
345 this.retryableErrorFn(apiError)) {
346 throw e;
347 }
348 else {
349 return bail(e);
350 }
351 }
352 }, {
353 retries: this.retryLimit,
354 factor: this.retryDelayMultiplier,
355 maxTimeout: this.maxRetryDelay * 1000,
356 maxRetryTime: this.maxRetryTotalTimeout * 1000, //convert to milliseconds
357 });
358 this.uri = uri;
359 this.offset = 0;
360 return uri;
361 }
362 async continueUploading() {
363 if (typeof this.offset === 'number') {
364 this.startUploading();
365 return;
366 }
367 await this.getAndSetOffset();
368 this.startUploading();
369 }
370 async startUploading() {
371 const multiChunkMode = !!this.chunkSize;
372 let responseReceived = false;
373 this.numChunksReadInRequest = 0;
374 if (!this.offset) {
375 this.offset = 0;
376 }
377 // Check if we're uploading the expected object
378 if (this.numBytesWritten === 0) {
379 const isSameObject = await this.ensureUploadingSameObject();
380 if (!isSameObject) {
381 // `ensureUploadingSameObject` will restart the upload.
382 return;
383 }
384 }
385 // Check if the offset (server) is too far behind the current stream
386 if (this.offset < this.numBytesWritten) {
387 this.emit('error', new RangeError('The offset is lower than the number of bytes written'));
388 return;
389 }
390 // Check if we should 'fast-forward' to the relevant data to upload
391 if (this.numBytesWritten < this.offset) {
392 // 'fast-forward' to the byte where we need to upload.
393 // only push data from the byte after the one we left off on
394 const fastForwardBytes = this.offset - this.numBytesWritten;
395 for await (const _chunk of this.upstreamIterator(fastForwardBytes)) {
396 _chunk; // discard the data up until the point we want
397 }
398 this.numBytesWritten = this.offset;
399 }
400 let expectedUploadSize = undefined;
401 // Set `expectedUploadSize` to `contentLength` if available
402 if (typeof this.contentLength === 'number') {
403 expectedUploadSize = this.contentLength - this.numBytesWritten;
404 }
405 // `expectedUploadSize` should be no more than the `chunkSize`.
406 // It's possible this is the last chunk request for a multiple
407 // chunk upload, thus smaller than the chunk size.
408 if (this.chunkSize) {
409 expectedUploadSize = expectedUploadSize
410 ? Math.min(this.chunkSize, expectedUploadSize)
411 : this.chunkSize;
412 }
413 // A queue for the upstream data
414 const upstreamQueue = this.upstreamIterator(expectedUploadSize, multiChunkMode // multi-chunk mode should return 1 chunk per request
415 );
416 // The primary read stream for this request. This stream retrieves no more
417 // than the exact requested amount from upstream.
418 const requestStream = new stream_1.Readable({
419 read: async () => {
420 // Don't attempt to retrieve data upstream if we already have a response
421 if (responseReceived)
422 requestStream.push(null);
423 const result = await upstreamQueue.next();
424 if (result.value) {
425 this.numChunksReadInRequest++;
426 this.lastChunkSent = result.value.chunk;
427 this.numBytesWritten += result.value.chunk.byteLength;
428 this.emit('progress', {
429 bytesWritten: this.numBytesWritten,
430 contentLength: this.contentLength,
431 });
432 requestStream.push(result.value.chunk, result.value.encoding);
433 }
434 if (result.done) {
435 requestStream.push(null);
436 }
437 },
438 });
439 let headers = {};
440 // If using multiple chunk upload, set appropriate header
441 if (multiChunkMode && expectedUploadSize) {
442 // The '-1' is because the ending byte is inclusive in the request.
443 const endingByte = expectedUploadSize + this.numBytesWritten - 1;
444 headers = {
445 'Content-Length': expectedUploadSize,
446 'Content-Range': `bytes ${this.offset}-${endingByte}/${this.contentLength}`,
447 };
448 }
449 else {
450 headers = {
451 'Content-Range': `bytes ${this.offset}-*/${this.contentLength}`,
452 };
453 }
454 const reqOpts = {
455 method: 'PUT',
456 url: this.uri,
457 headers,
458 body: requestStream,
459 };
460 try {
461 const resp = await this.makeRequestStream(reqOpts);
462 if (resp) {
463 responseReceived = true;
464 this.responseHandler(resp);
465 }
466 }
467 catch (err) {
468 const e = err;
469 this.destroy(e);
470 }
471 }
472 // Process the API response to look for errors that came in
473 // the response body.
474 responseHandler(resp) {
475 if (resp.data.error) {
476 this.destroy(resp.data.error);
477 return;
478 }
479 const shouldContinueWithNextMultiChunkRequest = this.chunkSize &&
480 resp.status === RESUMABLE_INCOMPLETE_STATUS_CODE &&
481 resp.headers.range;
482 if (shouldContinueWithNextMultiChunkRequest) {
483 // Use the upper value in this header to determine where to start the next chunk.
484 // We should not assume that the server received all bytes sent in the request.
485 // https://cloud.google.com/storage/docs/performing-resumable-uploads#chunked-upload
486 const range = resp.headers.range;
487 this.offset = Number(range.split('-')[1]) + 1;
488 // We should not assume that the server received all bytes sent in the request.
489 // - https://cloud.google.com/storage/docs/performing-resumable-uploads#chunked-upload
490 const missingBytes = this.numBytesWritten - this.offset;
491 if (missingBytes) {
492 const dataToPrependForResending = this.lastChunkSent.slice(-missingBytes);
493 // As multi-chunk uploads send one chunk per request and pulls one
494 // chunk into the pipeline, prepending the missing bytes back should
495 // be fine for the next request.
496 this.unshiftChunkBuffer(dataToPrependForResending);
497 this.numBytesWritten -= missingBytes;
498 this.lastChunkSent = Buffer.alloc(0);
499 }
500 // continue uploading next chunk
501 this.continueUploading();
502 }
503 else if (!this.isSuccessfulResponse(resp.status)) {
504 const err = {
505 code: resp.status,
506 name: 'Upload failed',
507 message: 'Upload failed',
508 };
509 this.destroy(err);
510 }
511 else {
512 // remove the last chunk sent
513 this.lastChunkSent = Buffer.alloc(0);
514 if (resp && resp.data) {
515 resp.data.size = Number(resp.data.size);
516 }
517 this.emit('metadata', resp.data);
518 this.deleteConfig();
519 // Allow the object (Upload) to continue naturally so the user's
520 // "finish" event fires.
521 this.emit('prepareFinish');
522 }
523 }
524 /**
525 * Check if this is the same content uploaded previously. This caches a
526 * slice of the first chunk, then compares it with the first byte of
527 * incoming data.
528 *
529 * @returns if the request is ok to continue as-is
530 */
531 async ensureUploadingSameObject() {
532 // A queue for the upstream data
533 const upstreamQueue = this.upstreamIterator(16, true // we just want one chunk for this validation
534 );
535 const upstreamChunk = await upstreamQueue.next();
536 const chunk = upstreamChunk.value
537 ? upstreamChunk.value.chunk
538 : Buffer.alloc(0);
539 // Put the original chunk back into the buffer as we just wanted to 'peek'
540 // at the stream for validation.
541 this.unshiftChunkBuffer(chunk);
542 let cachedFirstChunk = this.get('firstChunk');
543 const firstChunk = chunk.valueOf();
544 if (!cachedFirstChunk) {
545 // This is a new upload. Cache the first chunk.
546 this.set({ uri: this.uri, firstChunk });
547 }
548 else {
549 // this continues an upload in progress. check if the bytes are the same
550 cachedFirstChunk = Buffer.from(cachedFirstChunk);
551 const nextChunk = Buffer.from(firstChunk);
552 if (Buffer.compare(cachedFirstChunk, nextChunk) !== 0) {
553 // this data is not the same. start a new upload
554 this.restart();
555 return false;
556 }
557 }
558 return true;
559 }
560 async getAndSetOffset() {
561 const opts = {
562 method: 'PUT',
563 url: this.uri,
564 headers: { 'Content-Length': 0, 'Content-Range': 'bytes */*' },
565 };
566 try {
567 const resp = await this.makeRequest(opts);
568 if (resp.status === RESUMABLE_INCOMPLETE_STATUS_CODE) {
569 if (resp.headers.range) {
570 const range = resp.headers.range;
571 this.offset = Number(range.split('-')[1]) + 1;
572 return;
573 }
574 }
575 this.offset = 0;
576 }
577 catch (e) {
578 const err = e;
579 const resp = err.response;
580 // we don't return a 404 to the user if they provided the resumable
581 // URI. if we're just using the configstore file to tell us that this
582 // file exists, and it turns out that it doesn't (the 404), that's
583 // probably stale config data.
584 if (resp &&
585 resp.status === NOT_FOUND_STATUS_CODE &&
586 !this.uriProvidedManually) {
587 this.restart();
588 return;
589 }
590 // this resumable upload is unrecoverable (bad data or service error).
591 // -
592 // https://github.com/googleapis/gcs-resumable-upload/issues/15
593 // -
594 // https://github.com/googleapis/gcs-resumable-upload/pull/16#discussion_r80363774
595 if (resp && resp.status === TERMINATED_UPLOAD_STATUS_CODE) {
596 this.restart();
597 return;
598 }
599 this.destroy(err);
600 }
601 }
602 async makeRequest(reqOpts) {
603 if (this.encryption) {
604 reqOpts.headers = reqOpts.headers || {};
605 reqOpts.headers['x-goog-encryption-algorithm'] = 'AES256';
606 reqOpts.headers['x-goog-encryption-key'] = this.encryption.key.toString();
607 reqOpts.headers['x-goog-encryption-key-sha256'] =
608 this.encryption.hash.toString();
609 }
610 if (this.userProject) {
611 reqOpts.params = reqOpts.params || {};
612 reqOpts.params.userProject = this.userProject;
613 }
614 // Let gaxios know we will handle a 308 error code ourselves.
615 reqOpts.validateStatus = (status) => {
616 return (this.isSuccessfulResponse(status) ||
617 status === RESUMABLE_INCOMPLETE_STATUS_CODE);
618 };
619 const combinedReqOpts = extend(true, {}, this.customRequestOptions, reqOpts);
620 const res = await this.authClient.request(combinedReqOpts);
621 if (res.data && res.data.error) {
622 throw res.data.error;
623 }
624 return res;
625 }
626 async makeRequestStream(reqOpts) {
627 const controller = new abort_controller_1.default();
628 const errorCallback = () => controller.abort();
629 this.once('error', errorCallback);
630 if (this.userProject) {
631 reqOpts.params = reqOpts.params || {};
632 reqOpts.params.userProject = this.userProject;
633 }
634 reqOpts.signal = controller.signal;
635 reqOpts.validateStatus = () => true;
636 const combinedReqOpts = extend(true, {}, this.customRequestOptions, reqOpts);
637 const res = await this.authClient.request(combinedReqOpts);
638 const successfulRequest = this.onResponse(res);
639 this.removeListener('error', errorCallback);
640 return successfulRequest ? res : null;
641 }
642 restart() {
643 if (this.numBytesWritten) {
644 let message = 'Attempting to restart an upload after unrecoverable bytes have been written from upstream. ';
645 message += 'Stopping as this could result in data loss. ';
646 message += 'Create a new upload object to continue.';
647 this.emit('error', new RangeError(message));
648 return;
649 }
650 this.lastChunkSent = Buffer.alloc(0);
651 this.deleteConfig();
652 this.createURI((err, uri) => {
653 if (err) {
654 return this.destroy(err);
655 }
656 this.set({ uri });
657 this.startUploading();
658 return;
659 });
660 }
661 get(prop) {
662 const store = this.configStore.get(this.cacheKey);
663 return store && store[prop];
664 }
665 // eslint-disable-next-line @typescript-eslint/no-explicit-any
666 set(props) {
667 this.configStore.set(this.cacheKey, props);
668 }
669 deleteConfig() {
670 this.configStore.delete(this.cacheKey);
671 }
672 /**
673 * @return {bool} is the request good?
674 */
675 onResponse(resp) {
676 if ((this.retryableErrorFn &&
677 this.retryableErrorFn({
678 code: resp.status,
679 message: resp.statusText,
680 name: resp.statusText,
681 })) ||
682 resp.status === NOT_FOUND_STATUS_CODE ||
683 this.isServerErrorResponse(resp.status)) {
684 this.attemptDelayedRetry(resp);
685 return false;
686 }
687 this.emit('response', resp);
688 return true;
689 }
690 /**
691 * @param resp GaxiosResponse object from previous attempt
692 */
693 attemptDelayedRetry(resp) {
694 if (this.numRetries < this.retryLimit) {
695 if (resp.status === NOT_FOUND_STATUS_CODE &&
696 this.numChunksReadInRequest === 0) {
697 this.startUploading();
698 }
699 else {
700 const retryDelay = this.getRetryDelay();
701 if (retryDelay <= 0) {
702 this.destroy(new Error(`Retry total time limit exceeded - ${resp.data}`));
703 return;
704 }
705 // Unshift the most recent chunk back in case it's needed for the next
706 // request.
707 this.numBytesWritten -= this.lastChunkSent.byteLength;
708 this.unshiftChunkBuffer(this.lastChunkSent);
709 this.lastChunkSent = Buffer.alloc(0);
710 // We don't know how much data has been received by the server.
711 // `continueUploading` will recheck the offset via `getAndSetOffset`.
712 // If `offset` < `numberBytesReceived` then we will raise a RangeError
713 // as we've streamed too much data that has been missed - this should
714 // not be the case for multi-chunk uploads as `lastChunkSent` is the
715 // body of the entire request.
716 this.offset = undefined;
717 setTimeout(this.continueUploading.bind(this), retryDelay);
718 }
719 this.numRetries++;
720 }
721 else {
722 this.destroy(new Error('Retry limit exceeded - ' + resp.data));
723 }
724 }
725 /**
726 * @returns {number} the amount of time to wait before retrying the request
727 */
728 getRetryDelay() {
729 const randomMs = Math.round(Math.random() * 1000);
730 const waitTime = Math.pow(this.retryDelayMultiplier, this.numRetries) * 1000 + randomMs;
731 const maxAllowableDelayMs = this.maxRetryTotalTimeout * 1000 - (Date.now() - this.timeOfFirstRequest);
732 const maxRetryDelayMs = this.maxRetryDelay * 1000;
733 return Math.min(waitTime, maxRetryDelayMs, maxAllowableDelayMs);
734 }
735 /*
736 * Prepare user-defined API endpoint for compatibility with our API.
737 */
738 sanitizeEndpoint(url) {
739 if (!exports.PROTOCOL_REGEX.test(url)) {
740 url = `https://${url}`;
741 }
742 return url.replace(/\/+$/, ''); // Remove trailing slashes
743 }
744 /**
745 * Check if a given status code is 2xx
746 *
747 * @param status The status code to check
748 * @returns if the status is 2xx
749 */
750 isSuccessfulResponse(status) {
751 return status >= 200 && status < 300;
752 }
753 /**
754 * Check if a given status code is 5xx
755 *
756 * @param status The status code to check
757 * @returns if the status is 5xx
758 */
759 isServerErrorResponse(status) {
760 return status >= 500 && status < 600;
761 }
762}
763exports.Upload = Upload;
764function upload(cfg) {
765 return new Upload(cfg);
766}
767exports.upload = upload;
768function createURI(cfg, callback) {
769 const up = new Upload(cfg);
770 if (!callback) {
771 return up.createURI();
772 }
773 up.createURI().then(r => callback(null, r), callback);
774}
775exports.createURI = createURI;
776//# sourceMappingURL=index.js.map
\No newline at end of file