Backend half
This commit is contained in:
+304
@@ -0,0 +1,304 @@
|
||||
import { AbortMultipartUploadCommand, ChecksumAlgorithm, CompleteMultipartUploadCommand, CreateMultipartUploadCommand, PutObjectCommand, PutObjectTaggingCommand, UploadPartCommand, } from "@aws-sdk/client-s3";
|
||||
import { AbortController } from "@smithy/abort-controller";
|
||||
import { getEndpointFromInstructions, toEndpointV1, } from "@smithy/middleware-endpoint";
|
||||
import { extendedEncodeURIComponent } from "@smithy/smithy-client";
|
||||
import { EventEmitter } from "events";
|
||||
import { byteLength } from "./bytelength";
|
||||
import { getChunk } from "./chunker";
|
||||
export class Upload extends EventEmitter {
|
||||
static MIN_PART_SIZE = 1024 * 1024 * 5;
|
||||
MAX_PARTS = 10_000;
|
||||
queueSize = 4;
|
||||
partSize = Upload.MIN_PART_SIZE;
|
||||
leavePartsOnError = false;
|
||||
tags = [];
|
||||
client;
|
||||
params;
|
||||
totalBytes;
|
||||
bytesUploadedSoFar;
|
||||
abortController;
|
||||
concurrentUploaders = [];
|
||||
createMultiPartPromise;
|
||||
abortMultipartUploadCommand = null;
|
||||
uploadedParts = [];
|
||||
uploadEnqueuedPartsCount = 0;
|
||||
uploadId;
|
||||
uploadEvent;
|
||||
isMultiPart = true;
|
||||
singleUploadResult;
|
||||
sent = false;
|
||||
constructor(options) {
|
||||
super();
|
||||
this.queueSize = options.queueSize || this.queueSize;
|
||||
this.partSize = options.partSize || this.partSize;
|
||||
this.leavePartsOnError = options.leavePartsOnError || this.leavePartsOnError;
|
||||
this.tags = options.tags || this.tags;
|
||||
this.client = options.client;
|
||||
this.params = options.params;
|
||||
this.__validateInput();
|
||||
this.totalBytes = byteLength(this.params.Body);
|
||||
this.bytesUploadedSoFar = 0;
|
||||
this.abortController = options.abortController ?? new AbortController();
|
||||
}
|
||||
async abort() {
|
||||
this.abortController.abort();
|
||||
}
|
||||
async done() {
|
||||
if (this.sent) {
|
||||
throw new Error("@aws-sdk/lib-storage: this instance of Upload has already executed .done(). Create a new instance.");
|
||||
}
|
||||
this.sent = true;
|
||||
return await Promise.race([this.__doMultipartUpload(), this.__abortTimeout(this.abortController.signal)]);
|
||||
}
|
||||
on(event, listener) {
|
||||
this.uploadEvent = event;
|
||||
return super.on(event, listener);
|
||||
}
|
||||
async __uploadUsingPut(dataPart) {
|
||||
this.isMultiPart = false;
|
||||
const params = { ...this.params, Body: dataPart.data };
|
||||
const clientConfig = this.client.config;
|
||||
const requestHandler = clientConfig.requestHandler;
|
||||
const eventEmitter = requestHandler instanceof EventEmitter ? requestHandler : null;
|
||||
const uploadEventListener = (event) => {
|
||||
this.bytesUploadedSoFar = event.loaded;
|
||||
this.totalBytes = event.total;
|
||||
this.__notifyProgress({
|
||||
loaded: this.bytesUploadedSoFar,
|
||||
total: this.totalBytes,
|
||||
part: dataPart.partNumber,
|
||||
Key: this.params.Key,
|
||||
Bucket: this.params.Bucket,
|
||||
});
|
||||
};
|
||||
if (eventEmitter !== null) {
|
||||
eventEmitter.on("xhr.upload.progress", uploadEventListener);
|
||||
}
|
||||
const resolved = await Promise.all([this.client.send(new PutObjectCommand(params)), clientConfig?.endpoint?.()]);
|
||||
const putResult = resolved[0];
|
||||
let endpoint = resolved[1];
|
||||
if (!endpoint) {
|
||||
endpoint = toEndpointV1(await getEndpointFromInstructions(params, PutObjectCommand, {
|
||||
...clientConfig,
|
||||
}));
|
||||
}
|
||||
if (!endpoint) {
|
||||
throw new Error('Could not resolve endpoint from S3 "client.config.endpoint()" nor EndpointsV2.');
|
||||
}
|
||||
if (eventEmitter !== null) {
|
||||
eventEmitter.off("xhr.upload.progress", uploadEventListener);
|
||||
}
|
||||
const locationKey = this.params
|
||||
.Key.split("/")
|
||||
.map((segment) => extendedEncodeURIComponent(segment))
|
||||
.join("/");
|
||||
const locationBucket = extendedEncodeURIComponent(this.params.Bucket);
|
||||
const Location = (() => {
|
||||
const endpointHostnameIncludesBucket = endpoint.hostname.startsWith(`${locationBucket}.`);
|
||||
const forcePathStyle = this.client.config.forcePathStyle;
|
||||
const optionalPort = endpoint.port ? `:${endpoint.port}` : ``;
|
||||
if (forcePathStyle) {
|
||||
return `${endpoint.protocol}//${endpoint.hostname}${optionalPort}/${locationBucket}/${locationKey}`;
|
||||
}
|
||||
if (endpointHostnameIncludesBucket) {
|
||||
return `${endpoint.protocol}//${endpoint.hostname}${optionalPort}/${locationKey}`;
|
||||
}
|
||||
return `${endpoint.protocol}//${locationBucket}.${endpoint.hostname}${optionalPort}/${locationKey}`;
|
||||
})();
|
||||
this.singleUploadResult = {
|
||||
...putResult,
|
||||
Bucket: this.params.Bucket,
|
||||
Key: this.params.Key,
|
||||
Location,
|
||||
};
|
||||
const totalSize = byteLength(dataPart.data);
|
||||
this.__notifyProgress({
|
||||
loaded: totalSize,
|
||||
total: totalSize,
|
||||
part: 1,
|
||||
Key: this.params.Key,
|
||||
Bucket: this.params.Bucket,
|
||||
});
|
||||
}
|
||||
async __createMultipartUpload() {
|
||||
const requestChecksumCalculation = await this.client.config.requestChecksumCalculation();
|
||||
if (!this.createMultiPartPromise) {
|
||||
const createCommandParams = { ...this.params, Body: undefined };
|
||||
if (requestChecksumCalculation === "WHEN_SUPPORTED") {
|
||||
createCommandParams.ChecksumAlgorithm = this.params.ChecksumAlgorithm || ChecksumAlgorithm.CRC32;
|
||||
}
|
||||
this.createMultiPartPromise = this.client
|
||||
.send(new CreateMultipartUploadCommand(createCommandParams))
|
||||
.then((createMpuResponse) => {
|
||||
this.abortMultipartUploadCommand = new AbortMultipartUploadCommand({
|
||||
Bucket: this.params.Bucket,
|
||||
Key: this.params.Key,
|
||||
UploadId: createMpuResponse.UploadId,
|
||||
});
|
||||
return createMpuResponse;
|
||||
});
|
||||
}
|
||||
return this.createMultiPartPromise;
|
||||
}
|
||||
async __doConcurrentUpload(dataFeeder) {
|
||||
for await (const dataPart of dataFeeder) {
|
||||
if (this.uploadEnqueuedPartsCount > this.MAX_PARTS) {
|
||||
throw new Error(`Exceeded ${this.MAX_PARTS} parts in multipart upload to Bucket: ${this.params.Bucket} Key: ${this.params.Key}.`);
|
||||
}
|
||||
if (this.abortController.signal.aborted) {
|
||||
return;
|
||||
}
|
||||
if (dataPart.partNumber === 1 && dataPart.lastPart) {
|
||||
return await this.__uploadUsingPut(dataPart);
|
||||
}
|
||||
if (!this.uploadId) {
|
||||
const { UploadId } = await this.__createMultipartUpload();
|
||||
this.uploadId = UploadId;
|
||||
if (this.abortController.signal.aborted) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
const partSize = byteLength(dataPart.data) || 0;
|
||||
const requestHandler = this.client.config.requestHandler;
|
||||
const eventEmitter = requestHandler instanceof EventEmitter ? requestHandler : null;
|
||||
let lastSeenBytes = 0;
|
||||
const uploadEventListener = (event, request) => {
|
||||
const requestPartSize = Number(request.query["partNumber"]) || -1;
|
||||
if (requestPartSize !== dataPart.partNumber) {
|
||||
return;
|
||||
}
|
||||
if (event.total && partSize) {
|
||||
this.bytesUploadedSoFar += event.loaded - lastSeenBytes;
|
||||
lastSeenBytes = event.loaded;
|
||||
}
|
||||
this.__notifyProgress({
|
||||
loaded: this.bytesUploadedSoFar,
|
||||
total: this.totalBytes,
|
||||
part: dataPart.partNumber,
|
||||
Key: this.params.Key,
|
||||
Bucket: this.params.Bucket,
|
||||
});
|
||||
};
|
||||
if (eventEmitter !== null) {
|
||||
eventEmitter.on("xhr.upload.progress", uploadEventListener);
|
||||
}
|
||||
this.uploadEnqueuedPartsCount += 1;
|
||||
const partResult = await this.client.send(new UploadPartCommand({
|
||||
...this.params,
|
||||
ContentLength: undefined,
|
||||
UploadId: this.uploadId,
|
||||
Body: dataPart.data,
|
||||
PartNumber: dataPart.partNumber,
|
||||
}));
|
||||
if (eventEmitter !== null) {
|
||||
eventEmitter.off("xhr.upload.progress", uploadEventListener);
|
||||
}
|
||||
if (this.abortController.signal.aborted) {
|
||||
return;
|
||||
}
|
||||
if (!partResult.ETag) {
|
||||
throw new Error(`Part ${dataPart.partNumber} is missing ETag in UploadPart response. Missing Bucket CORS configuration for ETag header?`);
|
||||
}
|
||||
this.uploadedParts.push({
|
||||
PartNumber: dataPart.partNumber,
|
||||
ETag: partResult.ETag,
|
||||
...(partResult.ChecksumCRC32 && { ChecksumCRC32: partResult.ChecksumCRC32 }),
|
||||
...(partResult.ChecksumCRC32C && { ChecksumCRC32C: partResult.ChecksumCRC32C }),
|
||||
...(partResult.ChecksumSHA1 && { ChecksumSHA1: partResult.ChecksumSHA1 }),
|
||||
...(partResult.ChecksumSHA256 && { ChecksumSHA256: partResult.ChecksumSHA256 }),
|
||||
});
|
||||
if (eventEmitter === null) {
|
||||
this.bytesUploadedSoFar += partSize;
|
||||
}
|
||||
this.__notifyProgress({
|
||||
loaded: this.bytesUploadedSoFar,
|
||||
total: this.totalBytes,
|
||||
part: dataPart.partNumber,
|
||||
Key: this.params.Key,
|
||||
Bucket: this.params.Bucket,
|
||||
});
|
||||
}
|
||||
}
|
||||
async __doMultipartUpload() {
|
||||
const dataFeeder = getChunk(this.params.Body, this.partSize);
|
||||
const concurrentUploaderFailures = [];
|
||||
for (let index = 0; index < this.queueSize; index++) {
|
||||
const currentUpload = this.__doConcurrentUpload(dataFeeder).catch((err) => {
|
||||
concurrentUploaderFailures.push(err);
|
||||
});
|
||||
this.concurrentUploaders.push(currentUpload);
|
||||
}
|
||||
await Promise.all(this.concurrentUploaders);
|
||||
if (concurrentUploaderFailures.length >= 1) {
|
||||
await this.markUploadAsAborted();
|
||||
throw concurrentUploaderFailures[0];
|
||||
}
|
||||
if (this.abortController.signal.aborted) {
|
||||
await this.markUploadAsAborted();
|
||||
throw Object.assign(new Error("Upload aborted."), { name: "AbortError" });
|
||||
}
|
||||
let result;
|
||||
if (this.isMultiPart) {
|
||||
this.uploadedParts.sort((a, b) => a.PartNumber - b.PartNumber);
|
||||
const uploadCompleteParams = {
|
||||
...this.params,
|
||||
Body: undefined,
|
||||
UploadId: this.uploadId,
|
||||
MultipartUpload: {
|
||||
Parts: this.uploadedParts,
|
||||
},
|
||||
};
|
||||
result = await this.client.send(new CompleteMultipartUploadCommand(uploadCompleteParams));
|
||||
if (typeof result?.Location === "string" && result.Location.includes("%2F")) {
|
||||
result.Location = result.Location.replace(/%2F/g, "/");
|
||||
}
|
||||
}
|
||||
else {
|
||||
result = this.singleUploadResult;
|
||||
}
|
||||
this.abortMultipartUploadCommand = null;
|
||||
if (this.tags.length) {
|
||||
await this.client.send(new PutObjectTaggingCommand({
|
||||
...this.params,
|
||||
Tagging: {
|
||||
TagSet: this.tags,
|
||||
},
|
||||
}));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
async markUploadAsAborted() {
|
||||
if (this.uploadId && !this.leavePartsOnError && null !== this.abortMultipartUploadCommand) {
|
||||
await this.client.send(this.abortMultipartUploadCommand);
|
||||
this.abortMultipartUploadCommand = null;
|
||||
}
|
||||
}
|
||||
__notifyProgress(progress) {
|
||||
if (this.uploadEvent) {
|
||||
this.emit(this.uploadEvent, progress);
|
||||
}
|
||||
}
|
||||
async __abortTimeout(abortSignal) {
|
||||
return new Promise((resolve, reject) => {
|
||||
abortSignal.onabort = () => {
|
||||
const abortError = new Error("Upload aborted.");
|
||||
abortError.name = "AbortError";
|
||||
reject(abortError);
|
||||
};
|
||||
});
|
||||
}
|
||||
__validateInput() {
|
||||
if (!this.params) {
|
||||
throw new Error(`InputError: Upload requires params to be passed to upload.`);
|
||||
}
|
||||
if (!this.client) {
|
||||
throw new Error(`InputError: Upload requires a AWS client to do uploads with.`);
|
||||
}
|
||||
if (this.partSize < Upload.MIN_PART_SIZE) {
|
||||
throw new Error(`EntityTooSmall: Your proposed upload partsize [${this.partSize}] is smaller than the minimum allowed size [${Upload.MIN_PART_SIZE}] (5MB)`);
|
||||
}
|
||||
if (this.queueSize < 1) {
|
||||
throw new Error(`Queue size: Must have at least one uploading queue.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user