diff --git a/lib/api/apiUtils/object/abortMultipartUpload.js b/lib/api/apiUtils/object/abortMultipartUpload.js index ccced153d9..97eda8ff4e 100644 --- a/lib/api/apiUtils/object/abortMultipartUpload.js +++ b/lib/api/apiUtils/object/abortMultipartUpload.js @@ -6,6 +6,7 @@ const locationConstraintCheck = require('../object/locationConstraintCheck'); const { standardMetadataValidateBucketAndObj } = require('../../../metadata/metadataUtils'); const services = require('../../../services'); +const metadata = require('../../../metadata/wrapper'); function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, callback, request) { @@ -17,6 +18,8 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, preciseRequestType: request.apiMethods || 'multipartDelete', request, }; + + log.debug('processing request', { method: 'abortMultipartUpload' }); // For validating the request at the destinationBucket level // params are the same as validating at the MPU level // but the requestType is the more general 'objectDelete' @@ -27,8 +30,9 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, async.waterfall([ function checkDestBucketVal(next) { standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log, - (err, destinationBucket) => { + (err, destinationBucket, objectMD) => { if (err) { + log.error('error validating request', { error: err }); return next(err, destinationBucket); } if (destinationBucket.policies) { @@ -41,20 +45,21 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, metadataValMPUparams.requestType = 'bucketPolicyGoAhead'; } - return next(null, destinationBucket); + return next(null, destinationBucket, objectMD); }); }, - function checkMPUval(destBucket, next) { + function checkMPUval(destBucket, objectMD, next) { metadataValParams.log = log; services.metadataValidateMultipart(metadataValParams, (err, mpuBucket, mpuOverviewObj) => { if (err) { + log.error('error validating multipart', { error: err }); return next(err, destBucket); } - return next(err, mpuBucket, mpuOverviewObj, destBucket); + return next(err, mpuBucket, mpuOverviewObj, destBucket, objectMD); }); }, - function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, + function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, objectMD, next) { const location = mpuOverviewObj.controllingLocationConstraint; const originalIdentityAuthzResults = request.actionImplicitDenies; @@ -66,38 +71,66 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, // eslint-disable-next-line no-param-reassign request.actionImplicitDenies = originalIdentityAuthzResults; if (err) { + log.error('error aborting MPU', { error: err }); return next(err, destBucket); } // for Azure and GCP we do not need to delete data // for all other backends, skipDataDelete will be set to false - return next(null, mpuBucket, destBucket, skipDataDelete); + return next(null, mpuBucket, destBucket, objectMD, skipDataDelete); }); }, - function getPartLocations(mpuBucket, destBucket, skipDataDelete, + function getPartLocations(mpuBucket, destBucket, objectMD, skipDataDelete, next) { services.getMPUparts(mpuBucket.getName(), uploadId, log, (err, result) => { if (err) { + log.error('error getting parts', { error: err }); return next(err, destBucket); } const storedParts = result.Contents; - return next(null, mpuBucket, storedParts, destBucket, + return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); }); }, - function deleteData(mpuBucket, storedParts, destBucket, + function deleteObjectMetadata(mpuBucket, storedParts, destBucket, objectMD, skipDataDelete, next) { + if (!objectMD || metadataValMPUparams.uploadId !== objectMD.uploadId) { + return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); + } + // In case there has been an error during cleanup after a complete MPU + // (e.g. failure to delete MPU MD in shadow bucket), + // we need to ensure that the MPU metadata is deleted. + log.info('Object has existing metadata, deleting them', { + method: 'abortMultipartUpload', + bucketName, + objectKey, + uploadId, + versionId: objectMD.versionId, + }); + return metadata.deleteObjectMD(bucketName, objectKey, { versionId: objectMD.versionId }, log, err => { + if (err) { + log.error('error deleting object metadata', { error: err }); + } + return next(err, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); + }); + }, + function deleteData(mpuBucket, storedParts, destBucket, objectMD, skipDataDelete, next) { if (skipDataDelete) { return next(null, mpuBucket, storedParts, destBucket); } // The locations were sent to metadata as an array // under partLocations. Pull the partLocations. - let locations = storedParts.map(item => item.value.partLocations); + const locations = storedParts.flatMap(item => item.value.partLocations); if (locations.length === 0) { return next(null, mpuBucket, storedParts, destBucket); } - // flatten the array - locations = [].concat(...locations); + + if (objectMD?.location) { + const existingLocations = new Set(locations.map(loc => loc.key)); + const remainingObjectLocations = objectMD.location.filter(loc => !existingLocations.has(loc.key)); + locations.push(...remainingObjectLocations); + } + return async.eachLimit(locations, 5, (loc, cb) => { data.delete(loc, log, err => { if (err) { @@ -107,7 +140,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, }); }, () => next(null, mpuBucket, storedParts, destBucket)); }, - function deleteMetadata(mpuBucket, storedParts, destBucket, next) { + function deleteShadowObjectMetadata(mpuBucket, storedParts, destBucket, next) { let splitter = constants.splitter; // BACKWARD: Remove to remove the old splitter if (mpuBucket.getMdBucketModelVersion() < 2) { diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index 841e9839d1..5edfe48c9f 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -152,6 +152,7 @@ function completeMultipartUpload(authInfo, request, log, callback) { return services.metadataValidateMultipart(metadataValParams, (err, mpuBucket, mpuOverview, storedMetadata) => { if (err) { + log.error('error validating request', { error: err }); return next(err, destBucket); } return next(null, destBucket, objMD, mpuBucket, @@ -172,6 +173,7 @@ function completeMultipartUpload(authInfo, request, log, callback) { if (request.post) { return parseXml(request.post, (err, jsonList) => { if (err) { + log.error('error parsing XML', { error: err }); return next(err, destBucket); } return next(null, destBucket, objMD, mpuBucket, @@ -190,6 +192,12 @@ function completeMultipartUpload(authInfo, request, log, callback) { storedMetadata, }, log, err => { if (err) { + log.error('error marking MPU object for completion', { + bucketName: mpuBucket.getName(), + objectKey, + uploadId, + error: err, + }); return next(err); } return next(null, destBucket, objMD, mpuBucket, @@ -201,6 +209,7 @@ function completeMultipartUpload(authInfo, request, log, callback) { return services.getMPUparts(mpuBucket.getName(), uploadId, log, (err, result) => { if (err) { + log.error('error getting parts', { error: err }); return next(err, destBucket); } const storedParts = result.Contents; @@ -222,6 +231,7 @@ function completeMultipartUpload(authInfo, request, log, callback) { // eslint-disable-next-line no-param-reassign request.actionImplicitDenies = originalIdentityImpDenies; if (err) { + log.error('error completing MPU externally', { error: err }); return next(err, destBucket); } // if mpu not handled externally, completeObjData will be null @@ -452,6 +462,7 @@ function completeMultipartUpload(authInfo, request, log, callback) { dataLocations, pseudoCipherBundle, metaStoreParams, (err, res) => { if (err) { + log.error('error storing object metadata', { error: err }); return next(err, destinationBucket); } diff --git a/tests/unit/api/multipartUpload.js b/tests/unit/api/multipartUpload.js index 5e162a7252..4d4d37e0d1 100644 --- a/tests/unit/api/multipartUpload.js +++ b/tests/unit/api/multipartUpload.js @@ -142,7 +142,6 @@ function _createCompleteMpuRequest(uploadId, parts) { }; } - describe('Multipart Upload API', () => { beforeEach(() => { cleanup(); @@ -1917,6 +1916,159 @@ describe('Multipart Upload API', () => { done(); }); }); + + it('should abort an MPU and delete its MD if it has been created by a failed complete before', done => { + const delMeta = metadataBackend.deleteObject; + metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => cb(errors.InternalError); + const partBody = Buffer.from('I am a part\n', 'utf8'); + initiateRequest.headers['x-amz-meta-stuff'] = + 'I am some user metadata'; + async.waterfall([ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, + initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + assert.ifError(err); + const testUploadId = + json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest({ + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, partBody); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, + completeRequest, log, err => { + assert(err.is.InternalError); + const MD = metadata.keyMaps.get(bucketName) + .get(objectKey); + assert(MD); + assert.strictEqual(MD['x-amz-meta-stuff'], + 'I am some user metadata'); + assert.strictEqual(MD.uploadId, testUploadId); + + metadataBackend.deleteObject = delMeta; + const deleteRequest = { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + actionImplicitDenies: false, + }; + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); + multipartDelete(authInfo, deleteRequest, log, err => { + assert.strictEqual(err, null); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0); + done(); + }); + }); + }); + }); + }); + + it('should complete an MPU and promote its MD if it has been created by a failed complete before', done => { + const delMeta = metadataBackend.deleteObject; + metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => cb(errors.InternalError); + const partBody = Buffer.from('I am a part\n', 'utf8'); + initiateRequest.headers['x-amz-meta-stuff'] = + 'I am some user metadata'; + async.waterfall([ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, + initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + assert.ifError(err); + const testUploadId = + json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest({ + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, partBody); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, + completeRequest, log, err => { + assert(err.is.InternalError); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); + assert(MD); + assert.strictEqual(MD['x-amz-meta-stuff'], + 'I am some user metadata'); + assert.strictEqual(MD.uploadId, testUploadId); + metadataBackend.deleteObject = delMeta; + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); + completeMultipartUpload(authInfo, + completeRequest, log, err => { + assert.ifError(err); + const MD = metadata.keyMaps.get(bucketName) + .get(objectKey); + assert(MD); + assert.strictEqual(MD['x-amz-meta-stuff'], + 'I am some user metadata'); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0); + done(); + }); + }); + }); + }); + }); }); describe('complete mpu with versioning', () => { @@ -2150,6 +2302,85 @@ describe('complete mpu with versioning', () => { done(); }); }); + + it('should complete an MPU and promote its MD if it has been created by a failed complete before' + + 'without creating a new version', done => { + const delMeta = metadataBackend.deleteObject; + metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => cb(errors.InternalError); + const partBody = Buffer.from('I am a part\n', 'utf8'); + initiateRequest.headers['x-amz-meta-stuff'] = + 'I am some user metadata'; + async.waterfall([ + next => bucketPutVersioning(authInfo, + enableVersioningRequest, log, err => next(err)), + next => initiateMultipartUpload(authInfo, + initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + assert.ifError(err); + const testUploadId = + json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest({ + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, partBody); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, + completeRequest, log, err => { + assert(err.is.InternalError); + const MD = metadata.keyMaps.get(bucketName) + .get(objectKey); + assert(MD); + const firstVersionId = MD.versionId; + assert.strictEqual(MD['x-amz-meta-stuff'], + 'I am some user metadata'); + assert.strictEqual(MD.uploadId, testUploadId); + metadataBackend.deleteObject = delMeta; + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); + completeMultipartUpload(authInfo, + completeRequest, log, err => { + assert.ifError(err); + const MD = metadata.keyMaps.get(bucketName) + .get(objectKey); + assert(MD); + assert.strictEqual(MD.versionId, firstVersionId); + assert.strictEqual(MD['x-amz-meta-stuff'], + 'I am some user metadata'); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0); + done(); + }); + }); + }); + }); + }); }); describe('multipart upload with object lock', () => {