Skip to content

Commit

Permalink
In case of an abort, delete data and MD of object
Browse files Browse the repository at this point in the history
In case of a complete MPU error after the storage of the object MD, the abort must delete object MDs and object Data, some part may have been deleted already in the MPU metadata, hence we need to verify in the object MD for the parts it's linked to, to avoid creating orphans

Issue: CLDSRV-570
  • Loading branch information
KillianG committed Nov 13, 2024
1 parent c02566e commit cb95908
Showing 1 changed file with 44 additions and 13 deletions.
57 changes: 44 additions & 13 deletions lib/api/apiUtils/object/abortMultipartUpload.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ const locationConstraintCheck = require('../object/locationConstraintCheck');
const { standardMetadataValidateBucketAndObj } =
require('../../../metadata/metadataUtils');
const services = require('../../../services');
const metadata = require('../../../metadata/wrapper');

function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
callback, request) {
Expand All @@ -27,7 +28,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
async.waterfall([
function checkDestBucketVal(next) {
standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log,
(err, destinationBucket) => {
(err, destinationBucket, objectMD) => {
if (err) {
return next(err, destinationBucket);
}
Expand All @@ -41,20 +42,20 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
metadataValMPUparams.requestType =
'bucketPolicyGoAhead';
}
return next(null, destinationBucket);
return next(null, destinationBucket, objectMD);
});
},
function checkMPUval(destBucket, next) {
function checkMPUval(destBucket, objectMD, next) {
metadataValParams.log = log;
services.metadataValidateMultipart(metadataValParams,
(err, mpuBucket, mpuOverviewObj) => {
if (err) {
return next(err, destBucket);
}
return next(err, mpuBucket, mpuOverviewObj, destBucket);
return next(err, mpuBucket, mpuOverviewObj, destBucket, objectMD);
});
},
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, objectMD,
next) {
const location = mpuOverviewObj.controllingLocationConstraint;
const originalIdentityAuthzResults = request.actionImplicitDenies;
Expand All @@ -70,31 +71,31 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
}
// for Azure and GCP we do not need to delete data
// for all other backends, skipDataDelete will be set to false
return next(null, mpuBucket, destBucket, skipDataDelete);
return next(null, mpuBucket, destBucket, objectMD, skipDataDelete);
});
},
function getPartLocations(mpuBucket, destBucket, skipDataDelete,
function getPartLocations(mpuBucket, destBucket, objectMD, skipDataDelete,
next) {
services.getMPUparts(mpuBucket.getName(), uploadId, log,
(err, result) => {
if (err) {
return next(err, destBucket);
}
const storedParts = result.Contents;
return next(null, mpuBucket, storedParts, destBucket,
return next(null, mpuBucket, storedParts, destBucket, objectMD,
skipDataDelete);
});
},
function deleteData(mpuBucket, storedParts, destBucket,
function deleteData(mpuBucket, storedParts, destBucket, objectMD,
skipDataDelete, next) {
if (skipDataDelete) {
return next(null, mpuBucket, storedParts, destBucket);
return next(null, mpuBucket, storedParts, destBucket, objectMD);
}
// The locations were sent to metadata as an array
// under partLocations. Pull the partLocations.
let locations = storedParts.map(item => item.value.partLocations);
if (locations.length === 0) {
return next(null, mpuBucket, storedParts, destBucket);
return next(null, mpuBucket, storedParts, destBucket, objectMD);
}
// flatten the array
locations = [].concat(...locations);
Expand All @@ -105,9 +106,39 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
}
cb();
});
}, () => next(null, mpuBucket, storedParts, destBucket));
}, () => next(null, mpuBucket, storedParts, destBucket, objectMD));
},
function deleteMetadata(mpuBucket, storedParts, destBucket, next) {
function deleteObjectData(mpuBucket, storedParts, destBucket, objectMD, next) {
// Filtering parts that has already been delete by the previous step
let partLocations = storedParts.map(item => item.value.partLocations);
partLocations = [].concat(...partLocations);
partLocations = new Set(partLocations.map(loc => loc.key));
const objectLocationLeft = objectMD.location.filter(loc => !partLocations.has(loc.key));

return async.eachLimit(objectLocationLeft, 5, (loc, cb) => {
data.delete(loc, log, err => {
if (err) {
log.fatal('delete object data failed', { err });
}
cb();
});
}, () => next(null, mpuBucket, storedParts, destBucket, objectMD));
},
function deleteObjectMetadata(mpuBucket, storedParts, destBucket, objectMD, next) {
if (metadataValMPUparams.uploadId === objectMD.uploadId) {
// In case there has been an error during cleanup after a complete MPU
// (e.g. failure to delete MPU MD in shadow bucket),
// we need to ensure that the MPU metadata is deleted.

metadata.deleteObjectMD(bucketName, objectKey, undefined, log, err => {
if (err) {
log.error('error deleting object metadata', { error: err });
}
return next(err, mpuBucket, storedParts, destBucket);
});
}
},
function deleteShadowObjectMetadata(mpuBucket, storedParts, destBucket, next) {
let splitter = constants.splitter;
// BACKWARD: Remove to remove the old splitter
if (mpuBucket.getMdBucketModelVersion() < 2) {
Expand Down

0 comments on commit cb95908

Please sign in to comment.