diff --git a/lib/api/apiUtils/object/abortMultipartUpload.js b/lib/api/apiUtils/object/abortMultipartUpload.js index ccced153d9..6fdb540df3 100644 --- a/lib/api/apiUtils/object/abortMultipartUpload.js +++ b/lib/api/apiUtils/object/abortMultipartUpload.js @@ -6,6 +6,7 @@ const locationConstraintCheck = require('../object/locationConstraintCheck'); const { standardMetadataValidateBucketAndObj } = require('../../../metadata/metadataUtils'); const services = require('../../../services'); +const metadata = require('../../../metadata/wrapper'); function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, callback, request) { @@ -27,7 +28,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, async.waterfall([ function checkDestBucketVal(next) { standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log, - (err, destinationBucket) => { + (err, destinationBucket, objectMD) => { if (err) { return next(err, destinationBucket); } @@ -41,20 +42,20 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, metadataValMPUparams.requestType = 'bucketPolicyGoAhead'; } - return next(null, destinationBucket); + return next(null, destinationBucket, objectMD); }); }, - function checkMPUval(destBucket, next) { + function checkMPUval(destBucket, objectMD, next) { metadataValParams.log = log; services.metadataValidateMultipart(metadataValParams, (err, mpuBucket, mpuOverviewObj) => { if (err) { return next(err, destBucket); } - return next(err, mpuBucket, mpuOverviewObj, destBucket); + return next(err, mpuBucket, mpuOverviewObj, destBucket, objectMD); }); }, - function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, + function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, objectMD, next) { const location = mpuOverviewObj.controllingLocationConstraint; const originalIdentityAuthzResults = request.actionImplicitDenies; @@ -70,10 +71,10 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, } // for Azure and GCP we do not need to delete data // for all other backends, skipDataDelete will be set to false - return next(null, mpuBucket, destBucket, skipDataDelete); + return next(null, mpuBucket, destBucket, objectMD, skipDataDelete); }); }, - function getPartLocations(mpuBucket, destBucket, skipDataDelete, + function getPartLocations(mpuBucket, destBucket, objectMD, skipDataDelete, next) { services.getMPUparts(mpuBucket.getName(), uploadId, log, (err, result) => { @@ -81,20 +82,20 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, return next(err, destBucket); } const storedParts = result.Contents; - return next(null, mpuBucket, storedParts, destBucket, + return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); }); }, - function deleteData(mpuBucket, storedParts, destBucket, + function deleteData(mpuBucket, storedParts, destBucket, objectMD, skipDataDelete, next) { if (skipDataDelete) { - return next(null, mpuBucket, storedParts, destBucket); + return next(null, mpuBucket, storedParts, destBucket, objectMD); } // The locations were sent to metadata as an array // under partLocations. Pull the partLocations. let locations = storedParts.map(item => item.value.partLocations); if (locations.length === 0) { - return next(null, mpuBucket, storedParts, destBucket); + return next(null, mpuBucket, storedParts, destBucket, objectMD); } // flatten the array locations = [].concat(...locations); @@ -105,9 +106,39 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, } cb(); }); - }, () => next(null, mpuBucket, storedParts, destBucket)); + }, () => next(null, mpuBucket, storedParts, destBucket, objectMD)); }, - function deleteMetadata(mpuBucket, storedParts, destBucket, next) { + function deleteObjectData(mpuBucket, storedParts, destBucket, objectMD, next) { + // Filtering parts that has already been delete by the previous step + let partLocations = storedParts.map(item => item.value.partLocations); + partLocations = [].concat(...partLocations); + partLocations = new Set(partLocations.map(loc => loc.key)); + const objectLocationLeft = objectMD.location.filter(loc => !partLocations.has(loc.key)); + + return async.eachLimit(objectLocationLeft, 5, (loc, cb) => { + data.delete(loc, log, err => { + if (err) { + log.fatal('delete object data failed', { err }); + } + cb(); + }); + }, () => next(null, mpuBucket, storedParts, destBucket, objectMD)); + }, + function deleteObjectMetadata(mpuBucket, storedParts, destBucket, objectMD, next) { + if (metadataValMPUparams.uploadId === objectMD.uploadId) { + // In case there has been an error during cleanup after a complete MPU + // (e.g. failure to delete MPU MD in shadow bucket), + // we need to ensure that the MPU metadata is deleted. + + metadata.deleteObjectMD(bucketName, objectKey, undefined, log, err => { + if (err) { + log.error('error deleting object metadata', { error: err }); + } + return next(err, mpuBucket, storedParts, destBucket); + }); + } + }, + function deleteShadowObjectMetadata(mpuBucket, storedParts, destBucket, next) { let splitter = constants.splitter; // BACKWARD: Remove to remove the old splitter if (mpuBucket.getMdBucketModelVersion() < 2) {